From 2e4d765191c203f8b95f5e705175c1992929233b Mon Sep 17 00:00:00 2001 From: David Simansky Date: Wed, 23 Feb 2022 16:42:58 +0100 Subject: [PATCH] [release-v1.0.0] Add kn-plugin-func v0.21.0 (#977) --- go.mod | 22 +- go.sum | 912 +- openshift/release/kn.yaml | 14 +- pkg/kn/root/plugin_register.go | 1 + .../AlecAivazis/survey/v2/.travis.yml | 22 + .../AlecAivazis/survey/v2/CONTRIBUTING.md | 77 + .../AlecAivazis/survey/v2/Gopkg.lock | 78 + .../AlecAivazis/survey/v2/Gopkg.toml | 54 + .../github.com/AlecAivazis/survey/v2/LICENSE | 21 + .../AlecAivazis/survey/v2/README.md | 478 + .../AlecAivazis/survey/v2/_tasks.hcl | 19 + .../AlecAivazis/survey/v2/confirm.go | 153 + .../AlecAivazis/survey/v2/core/template.go | 91 + .../AlecAivazis/survey/v2/core/write.go | 356 + .../AlecAivazis/survey/v2/editor.go | 222 + .../AlecAivazis/survey/v2/filter.go | 1 + .../github.com/AlecAivazis/survey/v2/go.mod | 19 + .../github.com/AlecAivazis/survey/v2/go.sum | 35 + .../github.com/AlecAivazis/survey/v2/input.go | 225 + .../AlecAivazis/survey/v2/multiline.go | 110 + .../AlecAivazis/survey/v2/multiselect.go | 331 + .../AlecAivazis/survey/v2/password.go | 101 + .../AlecAivazis/survey/v2/renderer.go | 164 + .../AlecAivazis/survey/v2/select.go | 329 + .../AlecAivazis/survey/v2/survey.go | 413 + .../survey/v2/terminal/LICENSE.txt | 22 + .../AlecAivazis/survey/v2/terminal/README.md | 3 + .../survey/v2/terminal/buffered_reader.go | 22 + .../AlecAivazis/survey/v2/terminal/cursor.go | 190 + .../survey/v2/terminal/cursor_windows.go | 138 + .../AlecAivazis/survey/v2/terminal/display.go | 9 + .../survey/v2/terminal/display_posix.go | 11 + .../survey/v2/terminal/display_windows.go | 27 + .../AlecAivazis/survey/v2/terminal/error.go | 9 + .../AlecAivazis/survey/v2/terminal/output.go | 19 + .../survey/v2/terminal/output_windows.go | 227 + .../survey/v2/terminal/runereader.go | 373 + .../survey/v2/terminal/runereader_bsd.go | 13 + .../survey/v2/terminal/runereader_linux.go | 13 + .../survey/v2/terminal/runereader_posix.go | 111 + .../survey/v2/terminal/runereader_ppc64le.go | 7 + .../survey/v2/terminal/runereader_windows.go | 142 + .../survey/v2/terminal/sequences.go | 31 + .../AlecAivazis/survey/v2/terminal/stdio.go | 24 + .../survey/v2/terminal/syscall_windows.go | 39 + .../survey/v2/terminal/terminal.go | 8 + .../AlecAivazis/survey/v2/transform.go | 79 + .../AlecAivazis/survey/v2/validate.go | 87 + vendor/github.com/Azure/go-ansiterm/LICENSE | 21 + vendor/github.com/Azure/go-ansiterm/README.md | 12 + .../github.com/Azure/go-ansiterm/constants.go | 188 + .../github.com/Azure/go-ansiterm/context.go | 7 + .../Azure/go-ansiterm/csi_entry_state.go | 49 + .../Azure/go-ansiterm/csi_param_state.go | 38 + .../go-ansiterm/escape_intermediate_state.go | 36 + .../Azure/go-ansiterm/escape_state.go | 47 + .../Azure/go-ansiterm/event_handler.go | 90 + .../Azure/go-ansiterm/ground_state.go | 24 + .../Azure/go-ansiterm/osc_string_state.go | 31 + vendor/github.com/Azure/go-ansiterm/parser.go | 151 + .../go-ansiterm/parser_action_helpers.go | 99 + .../Azure/go-ansiterm/parser_actions.go | 119 + vendor/github.com/Azure/go-ansiterm/states.go | 71 + .../github.com/Azure/go-ansiterm/utilities.go | 21 + .../Azure/go-ansiterm/winterm/ansi.go | 183 + .../Azure/go-ansiterm/winterm/api.go | 327 + .../go-ansiterm/winterm/attr_translation.go | 100 + .../go-ansiterm/winterm/cursor_helpers.go | 101 + .../go-ansiterm/winterm/erase_helpers.go | 84 + .../go-ansiterm/winterm/scroll_helper.go | 118 + .../Azure/go-ansiterm/winterm/utilities.go | 9 + .../go-ansiterm/winterm/win_event_handler.go | 743 + vendor/github.com/BurntSushi/toml/.gitignore | 2 + vendor/github.com/BurntSushi/toml/COMPATIBLE | 1 + vendor/github.com/BurntSushi/toml/COPYING | 21 + vendor/github.com/BurntSushi/toml/README.md | 220 + vendor/github.com/BurntSushi/toml/decode.go | 511 + .../BurntSushi/toml/decode_go116.go | 18 + .../github.com/BurntSushi/toml/decode_meta.go | 123 + .../github.com/BurntSushi/toml/deprecated.go | 33 + vendor/github.com/BurntSushi/toml/doc.go | 13 + vendor/github.com/BurntSushi/toml/encode.go | 650 + vendor/github.com/BurntSushi/toml/go.mod | 3 + vendor/github.com/BurntSushi/toml/go.sum | 0 .../github.com/BurntSushi/toml/internal/tz.go | 36 + vendor/github.com/BurntSushi/toml/lex.go | 1225 ++ vendor/github.com/BurntSushi/toml/parse.go | 739 + .../github.com/BurntSushi/toml/type_check.go | 70 + .../github.com/BurntSushi/toml/type_fields.go | 242 + .../github.com/Masterminds/semver/.travis.yml | 29 + .../Masterminds/semver/CHANGELOG.md | 109 + .../github.com/Masterminds/semver/LICENSE.txt | 19 + vendor/github.com/Masterminds/semver/Makefile | 36 + .../github.com/Masterminds/semver/README.md | 194 + .../Masterminds/semver/appveyor.yml | 44 + .../Masterminds/semver/collection.go | 24 + .../Masterminds/semver/constraints.go | 423 + vendor/github.com/Masterminds/semver/doc.go | 115 + .../github.com/Masterminds/semver/version.go | 425 + .../Masterminds/semver/version_fuzz.go | 10 + .../github.com/Microsoft/go-winio/.gitignore | 1 + .../github.com/Microsoft/go-winio/CODEOWNERS | 1 + vendor/github.com/Microsoft/go-winio/LICENSE | 22 + .../github.com/Microsoft/go-winio/README.md | 37 + .../github.com/Microsoft/go-winio/backup.go | 280 + vendor/github.com/Microsoft/go-winio/ea.go | 137 + vendor/github.com/Microsoft/go-winio/file.go | 323 + .../github.com/Microsoft/go-winio/fileinfo.go | 73 + vendor/github.com/Microsoft/go-winio/go.mod | 9 + vendor/github.com/Microsoft/go-winio/go.sum | 14 + .../github.com/Microsoft/go-winio/hvsock.go | 307 + vendor/github.com/Microsoft/go-winio/pipe.go | 517 + .../Microsoft/go-winio/pkg/guid/guid.go | 237 + .../Microsoft/go-winio/privilege.go | 203 + .../github.com/Microsoft/go-winio/reparse.go | 128 + vendor/github.com/Microsoft/go-winio/sd.go | 98 + .../github.com/Microsoft/go-winio/syscall.go | 3 + .../Microsoft/go-winio/zsyscall_windows.go | 427 + vendor/github.com/Microsoft/hcsshim/LICENSE | 21 + .../hcsshim/osversion/osversion_windows.go | 50 + .../hcsshim/osversion/windowsbuilds.go | 38 + .../github.com/ProtonMail/go-crypto/AUTHORS | 3 + .../ProtonMail/go-crypto/CONTRIBUTORS | 3 + .../github.com/ProtonMail/go-crypto/LICENSE | 27 + .../github.com/ProtonMail/go-crypto/PATENTS | 22 + .../go-crypto/bitcurves/bitcurve.go | 381 + .../go-crypto/brainpool/brainpool.go | 134 + .../ProtonMail/go-crypto/brainpool/rcurve.go | 83 + .../ProtonMail/go-crypto/eax/eax.go | 162 + .../go-crypto/eax/eax_test_vectors.go | 58 + .../go-crypto/eax/random_vectors.go | 131 + .../go-crypto/internal/byteutil/byteutil.go | 92 + .../ProtonMail/go-crypto/ocb/ocb.go | 317 + .../go-crypto/ocb/random_vectors.go | 136 + .../ocb/rfc7253_test_vectors_suite_a.go | 78 + .../ocb/rfc7253_test_vectors_suite_b.go | 24 + .../go-crypto/openpgp/aes/keywrap/keywrap.go | 153 + .../go-crypto/openpgp/armor/armor.go | 224 + .../go-crypto/openpgp/armor/encode.go | 160 + .../go-crypto/openpgp/canonical_text.go | 65 + .../ProtonMail/go-crypto/openpgp/ecdh/ecdh.go | 165 + .../go-crypto/openpgp/ecdh/x25519.go | 157 + .../go-crypto/openpgp/elgamal/elgamal.go | 124 + .../go-crypto/openpgp/errors/errors.go | 116 + .../openpgp/internal/algorithm/aead.go | 65 + .../openpgp/internal/algorithm/cipher.go | 107 + .../openpgp/internal/algorithm/hash.go | 86 + .../openpgp/internal/ecc/curveInfo.go | 118 + .../openpgp/internal/ecc/curveType.go | 10 + .../openpgp/internal/encoding/encoding.go | 27 + .../openpgp/internal/encoding/mpi.go | 91 + .../openpgp/internal/encoding/oid.go | 88 + .../go-crypto/openpgp/key_generation.go | 375 + .../ProtonMail/go-crypto/openpgp/keys.go | 707 + .../go-crypto/openpgp/keys_test_data.go | 336 + .../go-crypto/openpgp/packet/aead_config.go | 56 + .../openpgp/packet/aead_encrypted.go | 364 + .../go-crypto/openpgp/packet/compressed.go | 123 + .../go-crypto/openpgp/packet/config.go | 167 + .../go-crypto/openpgp/packet/encrypted_key.go | 282 + .../go-crypto/openpgp/packet/literal.go | 91 + .../go-crypto/openpgp/packet/ocfb.go | 137 + .../openpgp/packet/one_pass_signature.go | 73 + .../go-crypto/openpgp/packet/opaque.go | 162 + .../go-crypto/openpgp/packet/packet.go | 522 + .../go-crypto/openpgp/packet/private_key.go | 780 ++ .../openpgp/packet/private_key_test_data.go | 12 + .../go-crypto/openpgp/packet/public_key.go | 825 ++ .../openpgp/packet/public_key_test_data.go | 24 + .../go-crypto/openpgp/packet/reader.go | 78 + .../go-crypto/openpgp/packet/signature.go | 964 ++ .../openpgp/packet/symmetric_key_encrypted.go | 267 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../go-crypto/openpgp/packet/userattribute.go | 94 + .../go-crypto/openpgp/packet/userid.go | 160 + .../ProtonMail/go-crypto/openpgp/read.go | 508 + .../go-crypto/openpgp/read_write_test_data.go | 173 + .../ProtonMail/go-crypto/openpgp/s2k/s2k.go | 367 + .../ProtonMail/go-crypto/openpgp/write.go | 559 + vendor/github.com/acomagu/bufpipe/README.md | 42 + vendor/github.com/acomagu/bufpipe/bufpipe.go | 128 + vendor/github.com/acomagu/bufpipe/doc.go | 2 + vendor/github.com/acomagu/bufpipe/go.mod | 5 + vendor/github.com/acomagu/bufpipe/go.sum | 2 + vendor/github.com/antlr/antlr4/LICENSE.txt | 52 + .../antlr/antlr4/runtime/Go/antlr/atn.go | 152 + .../antlr4/runtime/Go/antlr/atn_config.go | 295 + .../antlr4/runtime/Go/antlr/atn_config_set.go | 387 + .../Go/antlr/atn_deserialization_options.go | 25 + .../runtime/Go/antlr/atn_deserializer.go | 828 ++ .../antlr4/runtime/Go/antlr/atn_simulator.go | 50 + .../antlr4/runtime/Go/antlr/atn_state.go | 386 + .../antlr/antlr4/runtime/Go/antlr/atn_type.go | 11 + .../antlr4/runtime/Go/antlr/char_stream.go | 12 + .../runtime/Go/antlr/common_token_factory.go | 56 + .../runtime/Go/antlr/common_token_stream.go | 447 + .../antlr/antlr4/runtime/Go/antlr/dfa.go | 171 + .../antlr4/runtime/Go/antlr/dfa_serializer.go | 152 + .../antlr4/runtime/Go/antlr/dfa_state.go | 166 + .../Go/antlr/diagnostic_error_listener.go | 111 + .../antlr4/runtime/Go/antlr/error_listener.go | 108 + .../antlr4/runtime/Go/antlr/error_strategy.go | 758 + .../antlr/antlr4/runtime/Go/antlr/errors.go | 241 + .../antlr4/runtime/Go/antlr/file_stream.go | 49 + .../antlr4/runtime/Go/antlr/input_stream.go | 113 + .../antlr4/runtime/Go/antlr/int_stream.go | 16 + .../antlr4/runtime/Go/antlr/interval_set.go | 296 + .../antlr/antlr4/runtime/Go/antlr/lexer.go | 418 + .../antlr4/runtime/Go/antlr/lexer_action.go | 431 + .../runtime/Go/antlr/lexer_action_executor.go | 170 + .../runtime/Go/antlr/lexer_atn_simulator.go | 658 + .../antlr4/runtime/Go/antlr/ll1_analyzer.go | 215 + .../antlr/antlr4/runtime/Go/antlr/parser.go | 718 + .../runtime/Go/antlr/parser_atn_simulator.go | 1473 ++ .../runtime/Go/antlr/parser_rule_context.go | 362 + .../runtime/Go/antlr/prediction_context.go | 756 + .../runtime/Go/antlr/prediction_mode.go | 553 + .../antlr4/runtime/Go/antlr/recognizer.go | 217 + .../antlr4/runtime/Go/antlr/rule_context.go | 114 + .../runtime/Go/antlr/semantic_context.go | 455 + .../antlr/antlr4/runtime/Go/antlr/token.go | 210 + .../antlr4/runtime/Go/antlr/token_source.go | 17 + .../antlr4/runtime/Go/antlr/token_stream.go | 20 + .../runtime/Go/antlr/tokenstream_rewriter.go | 649 + .../antlr4/runtime/Go/antlr/trace_listener.go | 32 + .../antlr4/runtime/Go/antlr/transition.go | 421 + .../antlr/antlr4/runtime/Go/antlr/tree.go | 256 + .../antlr/antlr4/runtime/Go/antlr/trees.go | 137 + .../antlr/antlr4/runtime/Go/antlr/utils.go | 417 + vendor/github.com/apex/log/.gitignore | 1 + vendor/github.com/apex/log/History.md | 75 + vendor/github.com/apex/log/LICENSE | 22 + vendor/github.com/apex/log/Makefile | 2 + vendor/github.com/apex/log/Readme.md | 61 + vendor/github.com/apex/log/context.go | 19 + vendor/github.com/apex/log/default.go | 45 + vendor/github.com/apex/log/doc.go | 10 + vendor/github.com/apex/log/entry.go | 182 + vendor/github.com/apex/log/go.mod | 32 + vendor/github.com/apex/log/go.sum | 117 + vendor/github.com/apex/log/interface.go | 22 + vendor/github.com/apex/log/levels.go | 81 + vendor/github.com/apex/log/logger.go | 156 + vendor/github.com/apex/log/pkg.go | 108 + vendor/github.com/apex/log/stack.go | 8 + .../bits-and-blooms/bitset/.gitignore | 26 + .../bits-and-blooms/bitset/.travis.yml | 37 + .../github.com/bits-and-blooms/bitset/LICENSE | 27 + .../bits-and-blooms/bitset/README.md | 93 + .../bitset/azure-pipelines.yml | 39 + .../bits-and-blooms/bitset/bitset.go | 952 ++ .../github.com/bits-and-blooms/bitset/go.mod | 3 + .../github.com/bits-and-blooms/bitset/go.sum | 0 .../bits-and-blooms/bitset/popcnt.go | 53 + .../bits-and-blooms/bitset/popcnt_19.go | 45 + .../bits-and-blooms/bitset/popcnt_amd64.go | 68 + .../bits-and-blooms/bitset/popcnt_amd64.s | 104 + .../bits-and-blooms/bitset/popcnt_generic.go | 24 + .../bitset/trailing_zeros_18.go | 14 + .../bitset/trailing_zeros_19.go | 9 + .../github.com/buildpacks/imgutil/.gitignore | 18 + .../github.com/buildpacks/imgutil/CODEOWNERS | 1 + vendor/github.com/buildpacks/imgutil/LICENSE | 201 + vendor/github.com/buildpacks/imgutil/Makefile | 42 + .../github.com/buildpacks/imgutil/README.md | 19 + vendor/github.com/buildpacks/imgutil/go.mod | 13 + vendor/github.com/buildpacks/imgutil/go.sum | 1170 ++ .../buildpacks/imgutil/golangci.yaml | 32 + vendor/github.com/buildpacks/imgutil/image.go | 74 + .../imgutil/layer/bcdhive_generated.go | 31 + .../imgutil/layer/pax_permissions.go | 12 + .../imgutil/layer/windows_baselayer.go | 84 + .../imgutil/layer/windows_writer.go | 130 + .../buildpacks/imgutil/local/identifier.go | 9 + .../buildpacks/imgutil/local/local.go | 885 ++ .../buildpacks/imgutil/remote/identifier.go | 13 + .../buildpacks/imgutil/remote/remote.go | 734 + .../buildpacks/lifecycle/.dockerignore | 1 + .../buildpacks/lifecycle/.gitignore | 10 + .../buildpacks/lifecycle/.gitpod.yml | 19 + .../buildpacks/lifecycle/CODEOWNERS | 1 + .../buildpacks/lifecycle/CONTRIBUTING.md | 42 + .../buildpacks/lifecycle/DEVELOPMENT.md | 92 + .../github.com/buildpacks/lifecycle/IMAGE.md | 39 + .../github.com/buildpacks/lifecycle/LICENSE | 202 + .../github.com/buildpacks/lifecycle/Makefile | 315 + .../github.com/buildpacks/lifecycle/README.md | 61 + .../buildpacks/lifecycle/RELEASE.md | 22 + .../buildpacks/lifecycle/analyzer.go | 95 + .../buildpacks/lifecycle/api/apis.go | 87 + .../buildpacks/lifecycle/api/version.go | 129 + .../buildpacks/lifecycle/archive/archive.go | 72 + .../buildpacks/lifecycle/archive/extract.go | 117 + .../buildpacks/lifecycle/archive/reader.go | 75 + .../buildpacks/lifecycle/archive/tar_unix.go | 21 + .../lifecycle/archive/tar_windows.go | 56 + .../buildpacks/lifecycle/archive/writer.go | 71 + .../buildpacks/lifecycle/auth/env_keychain.go | 196 + .../buildpacks/lifecycle/builder.go | 202 + .../buildpacks/lifecycle/buildpack/build.go | 409 + .../lifecycle/buildpack/descriptor.go | 72 + .../buildpacks/lifecycle/buildpack/detect.go | 117 + .../buildpacks/lifecycle/buildpack/error.go | 26 + .../buildpacks/lifecycle/buildpack/files.go | 248 + .../lifecycle/buildpack/layertypes/types.go | 8 + .../buildpacks/lifecycle/buildpack/store.go | 38 + .../lifecycle/buildpack/v05/encoderdecoder.go | 42 + .../lifecycle/buildpack/v06/encoderdecoder.go | 58 + .../github.com/buildpacks/lifecycle/cache.go | 75 + .../buildpacks/lifecycle/cmd/command.go | 62 + .../buildpacks/lifecycle/cmd/exit.go | 87 + .../buildpacks/lifecycle/cmd/flags.go | 262 + .../buildpacks/lifecycle/cmd/flags_unix.go | 8 + .../buildpacks/lifecycle/cmd/flags_windows.go | 6 + .../buildpacks/lifecycle/cmd/logs.go | 87 + .../buildpacks/lifecycle/cmd/version.go | 106 + .../buildpacks/lifecycle/codecov.yml | 18 + .../buildpacks/lifecycle/cosign.pub | 5 + .../buildpacks/lifecycle/detector.go | 415 + .../buildpacks/lifecycle/env/build.go | 79 + .../buildpacks/lifecycle/env/env.go | 195 + .../buildpacks/lifecycle/env/launch.go | 51 + .../buildpacks/lifecycle/env/vars.go | 59 + .../buildpacks/lifecycle/exporter.go | 470 + vendor/github.com/buildpacks/lifecycle/go.mod | 25 + vendor/github.com/buildpacks/lifecycle/go.sum | 1219 ++ .../buildpacks/lifecycle/golangci.yaml | 34 + .../buildpacks/lifecycle/launch/bash.go | 59 + .../buildpacks/lifecycle/launch/cmd.go | 25 + .../buildpacks/lifecycle/launch/exec_d.go | 64 + .../lifecycle/launch/exec_d_unix.go | 13 + .../lifecycle/launch/exec_d_windows.go | 21 + .../buildpacks/lifecycle/launch/launch.go | 53 + .../buildpacks/lifecycle/launch/launcher.go | 209 + .../lifecycle/launch/launcher_unix.go | 16 + .../lifecycle/launch/launcher_windows.go | 25 + .../buildpacks/lifecycle/launch/process.go | 77 + .../buildpacks/lifecycle/launch/shell.go | 126 + .../lifecycle/layer_metadata_restorer.go | 209 + .../github.com/buildpacks/lifecycle/layers.go | 205 + .../buildpacks/lifecycle/layers/digest.go | 46 + .../buildpacks/lifecycle/layers/dir.go | 29 + .../buildpacks/lifecycle/layers/extract.go | 33 + .../buildpacks/lifecycle/layers/factory.go | 99 + .../buildpacks/lifecycle/layers/launcher.go | 125 + .../buildpacks/lifecycle/layers/slices.go | 251 + .../buildpacks/lifecycle/layers/writer.go | 46 + .../buildpacks/lifecycle/lifecycle.toml | 16 + .../github.com/buildpacks/lifecycle/logger.go | 15 + .../buildpacks/lifecycle/platform/cache.go | 14 + .../lifecycle/platform/common/platform.go | 10 + .../buildpacks/lifecycle/platform/files.go | 245 + .../buildpacks/lifecycle/platform/labels.go | 9 + .../buildpacks/lifecycle/platform/platform.go | 32 + .../lifecycle/platform/pre06/exit.go | 38 + .../lifecycle/platform/pre06/platform.go | 20 + .../buildpacks/lifecycle/platform/v06/exit.go | 38 + .../lifecycle/platform/v06/platform.go | 22 + .../buildpacks/lifecycle/platform/v07/exit.go | 9 + .../lifecycle/platform/v07/platform.go | 22 + .../buildpacks/lifecycle/rebaser.go | 126 + .../buildpacks/lifecycle/restorer.go | 120 + .../github.com/buildpacks/lifecycle/save.go | 95 + .../github.com/buildpacks/lifecycle/utils.go | 149 + vendor/github.com/buildpacks/pack/.gitignore | 16 + vendor/github.com/buildpacks/pack/.gitpod.yml | 14 + vendor/github.com/buildpacks/pack/CODEOWNERS | 1 + .../buildpacks/pack/CONTRIBUTING.md | 84 + .../github.com/buildpacks/pack/DEVELOPMENT.md | 114 + vendor/github.com/buildpacks/pack/LICENSE | 201 + vendor/github.com/buildpacks/pack/Makefile | 158 + vendor/github.com/buildpacks/pack/README.md | 45 + vendor/github.com/buildpacks/pack/RELEASE.md | 63 + .../buildpacks/pack/builder/config_reader.go | 113 + .../pack/builder/detection_order.go | 18 + .../github.com/buildpacks/pack/buildpack.yml | 9 + .../pack/buildpackage/config_reader.go | 116 + vendor/github.com/buildpacks/pack/codecov.yml | 18 + vendor/github.com/buildpacks/pack/go.mod | 42 + vendor/github.com/buildpacks/pack/go.sum | 1369 ++ .../github.com/buildpacks/pack/golangci.yaml | 32 + .../pack/internal/build/container_ops.go | 283 + .../internal/build/lifecycle_execution.go | 555 + .../pack/internal/build/lifecycle_executor.go | 108 + .../pack/internal/build/mount_paths.go | 59 + .../buildpacks/pack/internal/build/phase.go | 58 + .../internal/build/phase_config_provider.go | 233 + .../pack/internal/build/phase_factory.go | 39 + .../pack/internal/builder/builder.go | 785 ++ .../pack/internal/builder/descriptor.go | 111 + .../builder/detection_order_calculator.go | 106 + .../internal/builder/image_fetcher_wrapper.go | 34 + .../pack/internal/builder/inspect.go | 158 + .../pack/internal/builder/label_manager.go | 91 + .../builder/label_manager_provider.go | 11 + .../pack/internal/builder/lifecycle.go | 121 + .../pack/internal/builder/metadata.go | 36 + .../internal/builder/suggested_builder.go | 40 + .../pack/internal/builder/version.go | 47 + .../buildpacks/pack/internal/cache/consts.go | 8 + .../pack/internal/cache/image_cache.go | 39 + .../pack/internal/cache/volume_cache.go | 52 + .../buildpacks/pack/internal/config/config.go | 131 + .../pack/internal/config/config_helpers.go | 36 + .../buildpacks/pack/internal/container/run.go | 67 + .../buildpacks/pack/internal/layer/layer.go | 11 + .../pack/internal/layer/writer_factory.go | 32 + .../buildpacks/pack/internal/name/name.go | 49 + .../buildpacks/pack/internal/paths/paths.go | 135 + .../pack/internal/registry/buildpack.go | 45 + .../buildpacks/pack/internal/registry/git.go | 31 + .../pack/internal/registry/github.go | 73 + .../pack/internal/registry/index.go | 57 + .../pack/internal/registry/registry_cache.go | 357 + .../buildpacks/pack/internal/stack/merge.go | 62 + .../buildpacks/pack/internal/stack/mixins.go | 55 + .../pack/internal/stringset/stringset.go | 37 + .../buildpacks/pack/internal/style/style.go | 62 + .../buildpacks/pack/internal/term/term.go | 25 + .../buildpacks/pack/internal/termui/detect.go | 82 + .../buildpacks/pack/internal/termui/logger.go | 45 + .../buildpacks/pack/internal/termui/termui.go | 110 + .../buildpacks/pack/pkg/archive/archive.go | 382 + .../pack/pkg/archive/tar_builder.go | 94 + .../buildpacks/pack/pkg/blob/blob.go | 80 + .../buildpacks/pack/pkg/blob/downloader.go | 196 + .../buildpacks/pack/pkg/buildpack/builder.go | 295 + .../pack/pkg/buildpack/buildpack.go | 242 + .../pack/pkg/buildpack/buildpackage.go | 13 + .../pack/pkg/buildpack/downloader.go | 175 + .../pack/pkg/buildpack/locator_type.go | 147 + .../pack/pkg/buildpack/oci_layout_package.go | 193 + .../buildpacks/pack/pkg/buildpack/package.go | 87 + .../pack/pkg/buildpack/parse_name.go | 60 + .../buildpacks/pack/pkg/client/build.go | 931 ++ .../buildpacks/pack/pkg/client/client.go | 275 + .../buildpacks/pack/pkg/client/common.go | 127 + .../pack/pkg/client/create_builder.go | 272 + .../buildpacks/pack/pkg/client/errors.go | 25 + .../pack/pkg/client/inspect_builder.go | 106 + .../pack/pkg/client/inspect_buildpack.go | 167 + .../pack/pkg/client/inspect_image.go | 175 + .../pack/pkg/client/new_buildpack.go | 137 + .../pack/pkg/client/package_buildpack.go | 157 + .../pack/pkg/client/pull_buildpack.go | 65 + .../buildpacks/pack/pkg/client/rebase.go | 95 + .../pack/pkg/client/register_buildpack.go | 119 + .../buildpacks/pack/pkg/client/version.go | 6 + .../pack/pkg/client/yank_buildpack.go | 56 + .../buildpacks/pack/pkg/dist/buildpack.go | 39 + .../pack/pkg/dist/buildpack_descriptor.go | 60 + .../buildpacks/pack/pkg/dist/dist.go | 69 + .../buildpacks/pack/pkg/dist/distribution.go | 3 + .../buildpacks/pack/pkg/dist/image.go | 42 + .../buildpacks/pack/pkg/dist/layers.go | 45 + .../buildpacks/pack/pkg/image/fetcher.go | 187 + .../buildpacks/pack/pkg/image/pull_policy.go | 41 + .../pack/pkg/logging/logger_simple.go | 66 + .../pack/pkg/logging/logger_writers.go | 215 + .../buildpacks/pack/pkg/logging/logging.go | 66 + .../pack/pkg/logging/prefix_writer.go | 126 + .../pack/pkg/project/types/types.go | 50 + .../github.com/buildpacks/pack/project.toml | 3 + vendor/github.com/buildpacks/pack/version.go | 6 + vendor/github.com/cespare/xxhash/LICENSE.txt | 22 + vendor/github.com/cespare/xxhash/README.md | 50 + vendor/github.com/cespare/xxhash/go.mod | 6 + vendor/github.com/cespare/xxhash/go.sum | 4 + vendor/github.com/cespare/xxhash/rotate.go | 14 + vendor/github.com/cespare/xxhash/rotate19.go | 14 + vendor/github.com/cespare/xxhash/xxhash.go | 168 + .../github.com/cespare/xxhash/xxhash_amd64.go | 12 + .../github.com/cespare/xxhash/xxhash_amd64.s | 233 + .../github.com/cespare/xxhash/xxhash_other.go | 75 + .../github.com/cespare/xxhash/xxhash_safe.go | 10 + .../cespare/xxhash/xxhash_unsafe.go | 30 + .../github.com/cloudevents/sdk-go/v2/alias.go | 5 + .../sdk-go/v2/binding/binary_writer.go | 5 + .../cloudevents/sdk-go/v2/binding/doc.go | 5 + .../cloudevents/sdk-go/v2/binding/encoding.go | 5 + .../sdk-go/v2/binding/event_message.go | 5 + .../sdk-go/v2/binding/finish_message.go | 5 + .../sdk-go/v2/binding/format/doc.go | 5 + .../sdk-go/v2/binding/format/format.go | 5 + .../cloudevents/sdk-go/v2/binding/message.go | 5 + .../sdk-go/v2/binding/spec/attributes.go | 5 + .../cloudevents/sdk-go/v2/binding/spec/doc.go | 5 + .../v2/binding/spec/match_exact_version.go | 5 + .../sdk-go/v2/binding/spec/spec.go | 5 + .../sdk-go/v2/binding/structured_writer.go | 5 + .../cloudevents/sdk-go/v2/binding/to_event.go | 5 + .../sdk-go/v2/binding/transformer.go | 5 + .../cloudevents/sdk-go/v2/binding/write.go | 5 + .../cloudevents/sdk-go/v2/client/client.go | 5 + .../sdk-go/v2/client/client_http.go | 5 + .../sdk-go/v2/client/client_observed.go | 5 + .../sdk-go/v2/client/defaulters.go | 5 + .../cloudevents/sdk-go/v2/client/doc.go | 5 + .../sdk-go/v2/client/http_receiver.go | 5 + .../cloudevents/sdk-go/v2/client/invoker.go | 5 + .../sdk-go/v2/client/observability.go | 5 + .../cloudevents/sdk-go/v2/client/options.go | 5 + .../cloudevents/sdk-go/v2/client/receiver.go | 5 + .../cloudevents/sdk-go/v2/context/context.go | 5 + .../cloudevents/sdk-go/v2/context/doc.go | 5 + .../cloudevents/sdk-go/v2/context/logger.go | 5 + .../cloudevents/sdk-go/v2/context/retry.go | 5 + .../sdk-go/v2/event/content_type.go | 5 + .../sdk-go/v2/event/data_content_encoding.go | 5 + .../sdk-go/v2/event/datacodec/codec.go | 5 + .../sdk-go/v2/event/datacodec/doc.go | 5 + .../sdk-go/v2/event/datacodec/json/data.go | 5 + .../sdk-go/v2/event/datacodec/json/doc.go | 5 + .../sdk-go/v2/event/datacodec/text/data.go | 5 + .../sdk-go/v2/event/datacodec/text/doc.go | 5 + .../sdk-go/v2/event/datacodec/xml/data.go | 5 + .../sdk-go/v2/event/datacodec/xml/doc.go | 5 + .../cloudevents/sdk-go/v2/event/doc.go | 5 + .../cloudevents/sdk-go/v2/event/event.go | 5 + .../cloudevents/sdk-go/v2/event/event_data.go | 5 + .../sdk-go/v2/event/event_interface.go | 5 + .../sdk-go/v2/event/event_marshal.go | 5 + .../sdk-go/v2/event/event_reader.go | 5 + .../sdk-go/v2/event/event_unmarshal.go | 5 + .../sdk-go/v2/event/event_validation.go | 5 + .../sdk-go/v2/event/event_writer.go | 5 + .../sdk-go/v2/event/eventcontext.go | 5 + .../sdk-go/v2/event/eventcontext_v03.go | 5 + .../v2/event/eventcontext_v03_reader.go | 5 + .../v2/event/eventcontext_v03_writer.go | 5 + .../sdk-go/v2/event/eventcontext_v1.go | 5 + .../sdk-go/v2/event/eventcontext_v1_reader.go | 5 + .../sdk-go/v2/event/eventcontext_v1_writer.go | 5 + .../cloudevents/sdk-go/v2/event/extensions.go | 5 + .../github.com/cloudevents/sdk-go/v2/go.mod | 2 +- .../github.com/cloudevents/sdk-go/v2/go.sum | 5 +- .../cloudevents/sdk-go/v2/protocol/doc.go | 5 + .../cloudevents/sdk-go/v2/protocol/error.go | 5 + .../v2/protocol/http/abuse_protection.go | 5 + .../sdk-go/v2/protocol/http/doc.go | 5 + .../sdk-go/v2/protocol/http/headers.go | 5 + .../sdk-go/v2/protocol/http/message.go | 5 + .../sdk-go/v2/protocol/http/options.go | 5 + .../sdk-go/v2/protocol/http/protocol.go | 22 +- .../v2/protocol/http/protocol_lifecycle.go | 5 + .../sdk-go/v2/protocol/http/protocol_retry.go | 5 + .../sdk-go/v2/protocol/http/result.go | 5 + .../sdk-go/v2/protocol/http/retries_result.go | 5 + .../sdk-go/v2/protocol/http/write_request.go | 5 + .../v2/protocol/http/write_responsewriter.go | 5 + .../cloudevents/sdk-go/v2/protocol/inbound.go | 5 + .../sdk-go/v2/protocol/lifecycle.go | 5 + .../sdk-go/v2/protocol/outbound.go | 5 + .../cloudevents/sdk-go/v2/protocol/result.go | 5 + .../cloudevents/sdk-go/v2/types/allocate.go | 5 + .../cloudevents/sdk-go/v2/types/doc.go | 5 + .../cloudevents/sdk-go/v2/types/timestamp.go | 5 + .../cloudevents/sdk-go/v2/types/uri.go | 5 + .../cloudevents/sdk-go/v2/types/uriref.go | 5 + .../cloudevents/sdk-go/v2/types/value.go | 5 + .../github.com/containerd/containerd/LICENSE | 191 + .../github.com/containerd/containerd/NOTICE | 16 + .../containerd/containerd/errdefs/errors.go | 93 + .../containerd/containerd/errdefs/grpc.go | 147 + .../containerd/containerd/log/context.go | 68 + .../containerd/platforms/compare.go | 193 + .../containerd/platforms/cpuinfo.go | 131 + .../containerd/platforms/database.go | 114 + .../containerd/platforms/defaults.go | 43 + .../containerd/platforms/defaults_unix.go | 24 + .../containerd/platforms/defaults_windows.go | 81 + .../containerd/platforms/platforms.go | 278 + .../stargz-snapshotter/estargz/LICENSE | 202 + .../stargz-snapshotter/estargz/build.go | 628 + .../estargz/errorutil/errors.go | 40 + .../stargz-snapshotter/estargz/estargz.go | 1026 ++ .../stargz-snapshotter/estargz/go.mod | 11 + .../stargz-snapshotter/estargz/go.sum | 22 + .../stargz-snapshotter/estargz/gzip.go | 216 + .../stargz-snapshotter/estargz/testutil.go | 2009 +++ .../stargz-snapshotter/estargz/types.go | 316 + vendor/github.com/containers/image/v5/LICENSE | 189 + .../image/v5/docker/reference/README.md | 2 + .../image/v5/docker/reference/helpers.go | 42 + .../image/v5/docker/reference/normalize.go | 181 + .../image/v5/docker/reference/reference.go | 433 + .../image/v5/docker/reference/regexp.go | 143 + .../image/v5/internal/pkg/keyctl/key.go | 73 + .../image/v5/internal/pkg/keyctl/keyring.go | 117 + .../image/v5/internal/pkg/keyctl/perm.go | 33 + .../image/v5/internal/pkg/keyctl/sys_linux.go | 25 + .../v5/pkg/compression/internal/types.go | 57 + .../image/v5/pkg/compression/types/types.go | 13 + .../image/v5/pkg/docker/config/config.go | 502 + .../v5/pkg/docker/config/config_linux.go | 115 + .../pkg/docker/config/config_unsupported.go | 20 + .../containers/image/v5/types/types.go | 680 + vendor/github.com/containers/storage/AUTHORS | 1522 ++ vendor/github.com/containers/storage/LICENSE | 191 + vendor/github.com/containers/storage/NOTICE | 19 + .../storage/pkg/homedir/homedir_others.go | 35 + .../storage/pkg/homedir/homedir_unix.go | 140 + .../storage/pkg/homedir/homedir_windows.go | 27 + .../containers/storage/pkg/idtools/idtools.go | 325 + .../storage/pkg/idtools/idtools_unix.go | 210 + .../storage/pkg/idtools/idtools_windows.go | 23 + .../containers/storage/pkg/idtools/parser.go | 59 + .../storage/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../storage/pkg/idtools/utils_unix.go | 32 + .../containers/storage/pkg/mount/flags.go | 149 + .../storage/pkg/mount/flags_linux.go | 87 + .../storage/pkg/mount/flags_unsupported.go | 31 + .../containers/storage/pkg/mount/mount.go | 105 + .../storage/pkg/mount/mounter_freebsd.go | 54 + .../storage/pkg/mount/mounter_linux.go | 74 + .../storage/pkg/mount/mounter_unsupported.go | 7 + .../containers/storage/pkg/mount/mountinfo.go | 13 + .../storage/pkg/mount/mountinfo_linux.go | 5 + .../storage/pkg/mount/sharedsubtree_linux.go | 64 + .../storage/pkg/mount/unmount_unix.go | 22 + .../storage/pkg/mount/unmount_unsupported.go | 7 + .../containers/storage/pkg/reexec/README.md | 5 + .../storage/pkg/reexec/command_linux.go | 32 + .../storage/pkg/reexec/command_unix.go | 30 + .../storage/pkg/reexec/command_unsupported.go | 18 + .../storage/pkg/reexec/command_windows.go | 32 + .../containers/storage/pkg/reexec/reexec.go | 47 + .../containers/storage/pkg/system/chmod.go | 17 + .../containers/storage/pkg/system/chtimes.go | 35 + .../storage/pkg/system/chtimes_unix.go | 14 + .../storage/pkg/system/chtimes_windows.go | 28 + .../containers/storage/pkg/system/errors.go | 10 + .../containers/storage/pkg/system/exitcode.go | 33 + .../containers/storage/pkg/system/init.go | 22 + .../storage/pkg/system/init_windows.go | 17 + .../containers/storage/pkg/system/lchown.go | 20 + .../storage/pkg/system/lcow_unix.go | 8 + .../storage/pkg/system/lcow_windows.go | 6 + .../storage/pkg/system/lstat_unix.go | 20 + .../storage/pkg/system/lstat_windows.go | 14 + .../containers/storage/pkg/system/meminfo.go | 17 + .../storage/pkg/system/meminfo_linux.go | 65 + .../storage/pkg/system/meminfo_solaris.go | 129 + .../storage/pkg/system/meminfo_unsupported.go | 8 + .../storage/pkg/system/meminfo_windows.go | 45 + .../containers/storage/pkg/system/mknod.go | 22 + .../storage/pkg/system/mknod_windows.go | 13 + .../containers/storage/pkg/system/path.go | 21 + .../storage/pkg/system/path_unix.go | 9 + .../storage/pkg/system/path_windows.go | 33 + .../storage/pkg/system/process_unix.go | 24 + .../containers/storage/pkg/system/rm.go | 79 + .../storage/pkg/system/stat_darwin.go | 13 + .../storage/pkg/system/stat_freebsd.go | 13 + .../storage/pkg/system/stat_linux.go | 19 + .../storage/pkg/system/stat_openbsd.go | 13 + .../storage/pkg/system/stat_solaris.go | 13 + .../storage/pkg/system/stat_unix.go | 74 + .../storage/pkg/system/stat_windows.go | 63 + .../storage/pkg/system/syscall_unix.go | 17 + .../storage/pkg/system/syscall_windows.go | 122 + .../containers/storage/pkg/system/umask.go | 13 + .../storage/pkg/system/umask_windows.go | 9 + .../storage/pkg/system/utimes_freebsd.go | 24 + .../storage/pkg/system/utimes_linux.go | 25 + .../storage/pkg/system/utimes_unsupported.go | 10 + .../storage/pkg/system/xattrs_linux.go | 84 + .../storage/pkg/system/xattrs_unsupported.go | 28 + .../storage/pkg/unshare/getenv_linux_cgo.go | 22 + .../storage/pkg/unshare/getenv_linux_nocgo.go | 11 + .../containers/storage/pkg/unshare/unshare.c | 378 + .../containers/storage/pkg/unshare/unshare.go | 34 + .../storage/pkg/unshare/unshare_cgo.go | 10 + .../storage/pkg/unshare/unshare_gccgo.go | 25 + .../storage/pkg/unshare/unshare_linux.go | 605 + .../pkg/unshare/unshare_unsupported.go | 45 + .../pkg/unshare/unshare_unsupported_cgo.go | 10 + vendor/github.com/coreos/go-semver/LICENSE | 202 + vendor/github.com/coreos/go-semver/NOTICE | 5 + .../coreos/go-semver/semver/semver.go | 296 + .../coreos/go-semver/semver/sort.go | 38 + .../dgraph-io/ristretto/.deepsource.toml | 17 + vendor/github.com/dgraph-io/ristretto/LICENSE | 176 + .../github.com/dgraph-io/ristretto/README.md | 209 + .../github.com/dgraph-io/ristretto/cache.go | 488 + vendor/github.com/dgraph-io/ristretto/go.mod | 8 + vendor/github.com/dgraph-io/ristretto/go.sum | 8 + .../github.com/dgraph-io/ristretto/policy.go | 356 + vendor/github.com/dgraph-io/ristretto/ring.go | 91 + .../github.com/dgraph-io/ristretto/sketch.go | 155 + .../github.com/dgraph-io/ristretto/store.go | 181 + .../github.com/dgraph-io/ristretto/z/LICENSE | 64 + .../dgraph-io/ristretto/z/README.md | 129 + .../dgraph-io/ristretto/z/bbloom.go | 202 + .../dgraph-io/ristretto/z/rtutil.go | 64 + .../github.com/dgraph-io/ristretto/z/rtutil.s | 0 vendor/github.com/dgraph-io/ristretto/z/z.go | 56 + vendor/github.com/docker/cli/AUTHORS | 771 ++ vendor/github.com/docker/cli/LICENSE | 191 + vendor/github.com/docker/cli/NOTICE | 19 + .../docker/cli/cli/config/config.go | 163 + .../docker/cli/cli/config/configfile/file.go | 415 + .../cli/cli/config/configfile/file_unix.go | 35 + .../cli/cli/config/configfile/file_windows.go | 5 + .../cli/cli/config/credentials/credentials.go | 17 + .../cli/config/credentials/default_store.go | 21 + .../credentials/default_store_darwin.go | 5 + .../config/credentials/default_store_linux.go | 13 + .../credentials/default_store_unsupported.go | 7 + .../credentials/default_store_windows.go | 5 + .../cli/cli/config/credentials/file_store.go | 81 + .../cli/config/credentials/native_store.go | 143 + .../docker/cli/cli/config/types/authconfig.go | 22 + .../cli/connhelper/commandconn/commandconn.go | 283 + .../connhelper/commandconn/pdeathsig_linux.go | 10 + .../commandconn/pdeathsig_nolinux.go | 10 + .../connhelper/commandconn/session_unix.go | 13 + .../connhelper/commandconn/session_windows.go | 8 + .../docker/cli/cli/connhelper/connhelper.go | 68 + .../docker/cli/cli/connhelper/ssh/ssh.go | 64 + vendor/github.com/docker/distribution/LICENSE | 202 + .../docker/distribution/digestset/set.go | 247 + .../docker/distribution/reference/helpers.go | 42 + .../distribution/reference/normalize.go | 170 + .../distribution/reference/reference.go | 433 + .../docker/distribution/reference/regexp.go | 143 + .../registry/api/errcode/errors.go | 267 + .../registry/api/errcode/handler.go | 40 + .../registry/api/errcode/register.go | 138 + .../registry/client/auth/challenge/addr.go | 27 + .../client/auth/challenge/authchallenge.go | 237 + .../docker/docker-credential-helpers/LICENSE | 20 + .../client/client.go | 121 + .../client/command.go | 57 + .../credentials/credentials.go | 186 + .../credentials/error.go | 102 + .../credentials/helper.go | 14 + .../credentials/version.go | 4 + vendor/github.com/docker/docker/AUTHORS | 2175 +++ vendor/github.com/docker/docker/LICENSE | 191 + vendor/github.com/docker/docker/NOTICE | 19 + vendor/github.com/docker/docker/api/README.md | 42 + vendor/github.com/docker/docker/api/common.go | 11 + .../docker/docker/api/common_unix.go | 6 + .../docker/docker/api/common_windows.go | 8 + .../docker/docker/api/swagger-gen.yaml | 12 + .../github.com/docker/docker/api/swagger.yaml | 11425 ++++++++++++++++ .../docker/docker/api/types/auth.go | 22 + .../docker/docker/api/types/blkiodev/blkio.go | 23 + .../docker/docker/api/types/client.go | 419 + .../docker/docker/api/types/configs.go | 66 + .../docker/api/types/container/config.go | 69 + .../api/types/container/container_changes.go | 20 + .../api/types/container/container_create.go | 20 + .../api/types/container/container_top.go | 22 + .../api/types/container/container_update.go | 16 + .../api/types/container/container_wait.go | 28 + .../docker/api/types/container/host_config.go | 447 + .../api/types/container/hostconfig_unix.go | 41 + .../api/types/container/hostconfig_windows.go | 40 + .../api/types/container/waitcondition.go | 22 + .../docker/docker/api/types/error_response.go | 13 + .../docker/api/types/error_response_ext.go | 6 + .../docker/docker/api/types/events/events.go | 54 + .../docker/docker/api/types/filters/parse.go | 324 + .../docker/api/types/graph_driver_data.go | 17 + .../docker/docker/api/types/id_response.go | 13 + .../docker/api/types/image/image_history.go | 36 + .../api/types/image_delete_response_item.go | 15 + .../docker/docker/api/types/image_summary.go | 49 + .../docker/docker/api/types/mount/mount.go | 131 + .../docker/api/types/network/network.go | 126 + .../docker/docker/api/types/plugin.go | 203 + .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 71 + .../docker/docker/api/types/port.go | 23 + .../docker/api/types/registry/authenticate.go | 21 + .../docker/api/types/registry/registry.go | 119 + .../api/types/service_update_response.go | 12 + .../docker/docker/api/types/stats.go | 181 + .../docker/api/types/strslice/strslice.go | 30 + .../docker/docker/api/types/swarm/common.go | 40 + .../docker/docker/api/types/swarm/config.go | 40 + .../docker/api/types/swarm/container.go | 80 + .../docker/docker/api/types/swarm/network.go | 121 + .../docker/docker/api/types/swarm/node.go | 115 + .../docker/docker/api/types/swarm/runtime.go | 27 + .../docker/api/types/swarm/runtime/gen.go | 3 + .../api/types/swarm/runtime/plugin.pb.go | 754 + .../api/types/swarm/runtime/plugin.proto | 21 + .../docker/docker/api/types/swarm/secret.go | 36 + .../docker/docker/api/types/swarm/service.go | 202 + .../docker/docker/api/types/swarm/swarm.go | 227 + .../docker/docker/api/types/swarm/task.go | 206 + .../docker/api/types/time/duration_convert.go | 12 + .../docker/docker/api/types/time/timestamp.go | 129 + .../docker/docker/api/types/types.go | 635 + .../docker/api/types/versions/README.md | 14 + .../docker/api/types/versions/compare.go | 62 + .../docker/docker/api/types/volume.go | 72 + .../docker/api/types/volume/volume_create.go | 31 + .../docker/api/types/volume/volume_list.go | 23 + .../github.com/docker/docker/client/README.md | 35 + .../docker/docker/client/build_cancel.go | 16 + .../docker/docker/client/build_prune.go | 45 + .../docker/docker/client/checkpoint_create.go | 14 + .../docker/docker/client/checkpoint_delete.go | 20 + .../docker/docker/client/checkpoint_list.go | 28 + .../github.com/docker/docker/client/client.go | 310 + .../docker/docker/client/client_deprecated.go | 23 + .../docker/docker/client/client_unix.go | 9 + .../docker/docker/client/client_windows.go | 7 + .../docker/docker/client/config_create.go | 25 + .../docker/docker/client/config_inspect.go | 36 + .../docker/docker/client/config_list.go | 38 + .../docker/docker/client/config_remove.go | 13 + .../docker/docker/client/config_update.go | 21 + .../docker/docker/client/container_attach.go | 57 + .../docker/docker/client/container_commit.go | 55 + .../docker/docker/client/container_copy.go | 103 + .../docker/docker/client/container_create.go | 63 + .../docker/docker/client/container_diff.go | 23 + .../docker/docker/client/container_exec.go | 54 + .../docker/docker/client/container_export.go | 19 + .../docker/docker/client/container_inspect.go | 53 + .../docker/docker/client/container_kill.go | 16 + .../docker/docker/client/container_list.go | 57 + .../docker/docker/client/container_logs.go | 80 + .../docker/docker/client/container_pause.go | 10 + .../docker/docker/client/container_prune.go | 36 + .../docker/docker/client/container_remove.go | 27 + .../docker/docker/client/container_rename.go | 15 + .../docker/docker/client/container_resize.go | 29 + .../docker/docker/client/container_restart.go | 22 + .../docker/docker/client/container_start.go | 23 + .../docker/docker/client/container_stats.go | 42 + .../docker/docker/client/container_stop.go | 26 + .../docker/docker/client/container_top.go | 28 + .../docker/docker/client/container_unpause.go | 10 + .../docker/docker/client/container_update.go | 21 + .../docker/docker/client/container_wait.go | 83 + .../docker/docker/client/disk_usage.go | 26 + .../docker/client/distribution_inspect.go | 38 + .../github.com/docker/docker/client/errors.go | 138 + .../github.com/docker/docker/client/events.go | 102 + .../github.com/docker/docker/client/hijack.go | 145 + .../docker/docker/client/image_build.go | 146 + .../docker/docker/client/image_create.go | 37 + .../docker/docker/client/image_history.go | 22 + .../docker/docker/client/image_import.go | 40 + .../docker/docker/client/image_inspect.go | 32 + .../docker/docker/client/image_list.go | 46 + .../docker/docker/client/image_load.go | 29 + .../docker/docker/client/image_prune.go | 36 + .../docker/docker/client/image_pull.go | 64 + .../docker/docker/client/image_push.go | 54 + .../docker/docker/client/image_remove.go | 31 + .../docker/docker/client/image_save.go | 21 + .../docker/docker/client/image_search.go | 51 + .../docker/docker/client/image_tag.go | 37 + .../github.com/docker/docker/client/info.go | 26 + .../docker/docker/client/interface.go | 201 + .../docker/client/interface_experimental.go | 18 + .../docker/docker/client/interface_stable.go | 10 + .../github.com/docker/docker/client/login.go | 25 + .../docker/docker/client/network_connect.go | 19 + .../docker/docker/client/network_create.go | 25 + .../docker/client/network_disconnect.go | 15 + .../docker/docker/client/network_inspect.go | 49 + .../docker/docker/client/network_list.go | 32 + .../docker/docker/client/network_prune.go | 36 + .../docker/docker/client/network_remove.go | 10 + .../docker/docker/client/node_inspect.go | 32 + .../docker/docker/client/node_list.go | 36 + .../docker/docker/client/node_remove.go | 20 + .../docker/docker/client/node_update.go | 18 + .../docker/docker/client/options.go | 172 + .../github.com/docker/docker/client/ping.go | 66 + .../docker/docker/client/plugin_create.go | 23 + .../docker/docker/client/plugin_disable.go | 19 + .../docker/docker/client/plugin_enable.go | 19 + .../docker/docker/client/plugin_inspect.go | 31 + .../docker/docker/client/plugin_install.go | 113 + .../docker/docker/client/plugin_list.go | 33 + .../docker/docker/client/plugin_push.go | 16 + .../docker/docker/client/plugin_remove.go | 20 + .../docker/docker/client/plugin_set.go | 12 + .../docker/docker/client/plugin_upgrade.go | 39 + .../docker/docker/client/request.go | 269 + .../docker/docker/client/secret_create.go | 25 + .../docker/docker/client/secret_inspect.go | 36 + .../docker/docker/client/secret_list.go | 38 + .../docker/docker/client/secret_remove.go | 13 + .../docker/docker/client/secret_update.go | 21 + .../docker/docker/client/service_create.go | 178 + .../docker/docker/client/service_inspect.go | 37 + .../docker/docker/client/service_list.go | 39 + .../docker/docker/client/service_logs.go | 52 + .../docker/docker/client/service_remove.go | 10 + .../docker/docker/client/service_update.go | 75 + .../docker/client/swarm_get_unlock_key.go | 21 + .../docker/docker/client/swarm_init.go | 21 + .../docker/docker/client/swarm_inspect.go | 21 + .../docker/docker/client/swarm_join.go | 14 + .../docker/docker/client/swarm_leave.go | 17 + .../docker/docker/client/swarm_unlock.go | 14 + .../docker/docker/client/swarm_update.go | 22 + .../docker/docker/client/task_inspect.go | 32 + .../docker/docker/client/task_list.go | 35 + .../docker/docker/client/task_logs.go | 51 + .../docker/docker/client/transport.go | 17 + .../github.com/docker/docker/client/utils.go | 34 + .../docker/docker/client/version.go | 21 + .../docker/docker/client/volume_create.go | 21 + .../docker/docker/client/volume_inspect.go | 38 + .../docker/docker/client/volume_list.go | 33 + .../docker/docker/client/volume_prune.go | 36 + .../docker/docker/client/volume_remove.go | 21 + .../github.com/docker/docker/errdefs/defs.go | 69 + .../github.com/docker/docker/errdefs/doc.go | 8 + .../docker/docker/errdefs/helpers.go | 279 + .../docker/docker/errdefs/http_helpers.go | 191 + vendor/github.com/docker/docker/errdefs/is.go | 107 + .../docker/pkg/homedir/homedir_linux.go | 93 + .../docker/pkg/homedir/homedir_others.go | 27 + .../docker/docker/pkg/homedir/homedir_unix.go | 38 + .../docker/pkg/homedir/homedir_windows.go | 24 + .../docker/docker/pkg/idtools/idtools.go | 241 + .../docker/docker/pkg/idtools/idtools_unix.go | 295 + .../docker/pkg/idtools/idtools_windows.go | 25 + .../docker/pkg/idtools/usergroupadd_linux.go | 164 + .../pkg/idtools/usergroupadd_unsupported.go | 12 + .../docker/docker/pkg/idtools/utils_unix.go | 31 + .../docker/docker/pkg/ioutils/buffer.go | 51 + .../docker/docker/pkg/ioutils/bytespipe.go | 187 + .../docker/docker/pkg/ioutils/fswriters.go | 162 + .../docker/docker/pkg/ioutils/readers.go | 157 + .../docker/docker/pkg/ioutils/temp_unix.go | 10 + .../docker/docker/pkg/ioutils/temp_windows.go | 16 + .../docker/docker/pkg/ioutils/writeflusher.go | 92 + .../docker/docker/pkg/ioutils/writers.go | 66 + .../docker/pkg/jsonmessage/jsonmessage.go | 283 + .../docker/docker/pkg/longpath/longpath.go | 26 + .../docker/docker/pkg/stdcopy/stdcopy.go | 190 + .../docker/docker/pkg/stringid/README.md | 1 + .../docker/docker/pkg/stringid/stringid.go | 63 + .../docker/docker/pkg/system/args_windows.go | 16 + .../docker/docker/pkg/system/chtimes.go | 31 + .../docker/pkg/system/chtimes_nowindows.go | 14 + .../docker/pkg/system/chtimes_windows.go | 26 + .../docker/docker/pkg/system/errors.go | 13 + .../docker/docker/pkg/system/exitcode.go | 19 + .../docker/docker/pkg/system/filesys_unix.go | 67 + .../docker/pkg/system/filesys_windows.go | 292 + .../docker/docker/pkg/system/init.go | 22 + .../docker/docker/pkg/system/init_windows.go | 29 + .../docker/docker/pkg/system/lcow.go | 48 + .../docker/pkg/system/lcow_unsupported.go | 28 + .../docker/docker/pkg/system/lstat_unix.go | 20 + .../docker/docker/pkg/system/lstat_windows.go | 14 + .../docker/docker/pkg/system/meminfo.go | 17 + .../docker/docker/pkg/system/meminfo_linux.go | 71 + .../docker/pkg/system/meminfo_unsupported.go | 8 + .../docker/pkg/system/meminfo_windows.go | 45 + .../docker/docker/pkg/system/mknod.go | 22 + .../docker/docker/pkg/system/mknod_windows.go | 11 + .../docker/docker/pkg/system/path.go | 64 + .../docker/docker/pkg/system/path_unix.go | 10 + .../docker/docker/pkg/system/path_windows.go | 27 + .../docker/docker/pkg/system/process_unix.go | 44 + .../docker/pkg/system/process_windows.go | 18 + .../github.com/docker/docker/pkg/system/rm.go | 78 + .../docker/docker/pkg/system/rm_windows.go | 6 + .../docker/docker/pkg/system/stat_bsd.go | 15 + .../docker/docker/pkg/system/stat_darwin.go | 13 + .../docker/docker/pkg/system/stat_linux.go | 20 + .../docker/docker/pkg/system/stat_openbsd.go | 13 + .../docker/docker/pkg/system/stat_solaris.go | 13 + .../docker/docker/pkg/system/stat_unix.go | 66 + .../docker/docker/pkg/system/stat_windows.go | 49 + .../docker/docker/pkg/system/syscall_unix.go | 11 + .../docker/pkg/system/syscall_windows.go | 136 + .../docker/docker/pkg/system/umask.go | 13 + .../docker/docker/pkg/system/umask_windows.go | 7 + .../docker/docker/pkg/system/utimes_unix.go | 24 + .../docker/pkg/system/utimes_unsupported.go | 10 + .../docker/docker/pkg/system/xattrs_linux.go | 37 + .../docker/pkg/system/xattrs_unsupported.go | 13 + .../docker/volume/mounts/lcow_parser.go | 34 + .../docker/volume/mounts/linux_parser.go | 423 + .../docker/docker/volume/mounts/mounts.go | 181 + .../docker/docker/volume/mounts/parser.go | 47 + .../docker/docker/volume/mounts/validate.go | 28 + .../docker/volume/mounts/volume_copy.go | 23 + .../docker/volume/mounts/volume_unix.go | 18 + .../docker/volume/mounts/volume_windows.go | 8 + .../docker/volume/mounts/windows_parser.go | 456 + .../github.com/docker/docker/volume/volume.go | 69 + .../github.com/docker/go-connections/LICENSE | 191 + .../docker/go-connections/nat/nat.go | 242 + .../docker/go-connections/nat/parse.go | 57 + .../docker/go-connections/nat/sort.go | 96 + .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 + .../docker/go-connections/sockets/proxy.go | 51 + .../docker/go-connections/sockets/sockets.go | 38 + .../go-connections/sockets/sockets_unix.go | 35 + .../go-connections/sockets/sockets_windows.go | 27 + .../go-connections/sockets/tcp_socket.go | 22 + .../go-connections/sockets/unix_socket.go | 32 + .../go-connections/tlsconfig/certpool_go17.go | 18 + .../tlsconfig/certpool_other.go | 13 + .../docker/go-connections/tlsconfig/config.go | 254 + .../tlsconfig/config_client_ciphers.go | 17 + .../tlsconfig/config_legacy_client_ciphers.go | 15 + .../docker/go-units/CONTRIBUTING.md | 67 + vendor/github.com/docker/go-units/LICENSE | 191 + vendor/github.com/docker/go-units/MAINTAINERS | 46 + vendor/github.com/docker/go-units/README.md | 16 + vendor/github.com/docker/go-units/circle.yml | 11 + vendor/github.com/docker/go-units/duration.go | 35 + vendor/github.com/docker/go-units/size.go | 108 + vendor/github.com/docker/go-units/ulimit.go | 123 + vendor/github.com/emirpasic/gods/LICENSE | 41 + .../emirpasic/gods/containers/containers.go | 35 + .../emirpasic/gods/containers/enumerable.go | 61 + .../emirpasic/gods/containers/iterator.go | 109 + .../gods/containers/serialization.go | 17 + .../gods/lists/arraylist/arraylist.go | 228 + .../gods/lists/arraylist/enumerable.go | 79 + .../gods/lists/arraylist/iterator.go | 83 + .../gods/lists/arraylist/serialization.go | 29 + .../github.com/emirpasic/gods/lists/lists.go | 33 + .../gods/trees/binaryheap/binaryheap.go | 163 + .../gods/trees/binaryheap/iterator.go | 84 + .../gods/trees/binaryheap/serialization.go | 22 + .../github.com/emirpasic/gods/trees/trees.go | 21 + .../emirpasic/gods/utils/comparator.go | 251 + .../github.com/emirpasic/gods/utils/sort.go | 29 + .../github.com/emirpasic/gods/utils/utils.go | 47 + .../github.com/evanphx/json-patch/.travis.yml | 19 - .../github.com/evanphx/json-patch/README.md | 19 + vendor/github.com/evanphx/json-patch/merge.go | 33 +- vendor/github.com/evanphx/json-patch/patch.go | 4 + vendor/github.com/fatih/color/LICENSE.md | 20 + vendor/github.com/fatih/color/README.md | 178 + vendor/github.com/fatih/color/color.go | 618 + vendor/github.com/fatih/color/doc.go | 135 + vendor/github.com/fatih/color/go.mod | 8 + vendor/github.com/fatih/color/go.sum | 9 + .../github.com/gdamore/encoding/.appveyor.yml | 13 + .../github.com/gdamore/encoding/.travis.yml | 7 + vendor/github.com/gdamore/encoding/LICENSE | 202 + vendor/github.com/gdamore/encoding/README.md | 19 + vendor/github.com/gdamore/encoding/ascii.go | 36 + vendor/github.com/gdamore/encoding/charmap.go | 196 + vendor/github.com/gdamore/encoding/doc.go | 17 + vendor/github.com/gdamore/encoding/ebcdic.go | 273 + vendor/github.com/gdamore/encoding/go.mod | 5 + vendor/github.com/gdamore/encoding/go.sum | 2 + vendor/github.com/gdamore/encoding/latin1.go | 33 + vendor/github.com/gdamore/encoding/latin5.go | 35 + vendor/github.com/gdamore/encoding/utf8.go | 35 + .../github.com/gdamore/tcell/v2/.appveyor.yml | 13 + vendor/github.com/gdamore/tcell/v2/.gitignore | 1 + .../github.com/gdamore/tcell/v2/.travis.yml | 18 + vendor/github.com/gdamore/tcell/v2/AUTHORS | 4 + .../github.com/gdamore/tcell/v2/CHANGESv2.md | 82 + vendor/github.com/gdamore/tcell/v2/LICENSE | 202 + vendor/github.com/gdamore/tcell/v2/README.md | 272 + .../github.com/gdamore/tcell/v2/TUTORIAL.md | 293 + vendor/github.com/gdamore/tcell/v2/attr.go | 33 + vendor/github.com/gdamore/tcell/v2/cell.go | 177 + .../gdamore/tcell/v2/charset_stub.go | 21 + .../gdamore/tcell/v2/charset_unix.go | 49 + .../gdamore/tcell/v2/charset_windows.go | 21 + vendor/github.com/gdamore/tcell/v2/color.go | 1081 ++ .../github.com/gdamore/tcell/v2/colorfit.go | 52 + .../gdamore/tcell/v2/console_stub.go | 23 + .../gdamore/tcell/v2/console_win.go | 1269 ++ vendor/github.com/gdamore/tcell/v2/doc.go | 48 + .../github.com/gdamore/tcell/v2/encoding.go | 139 + vendor/github.com/gdamore/tcell/v2/errors.go | 73 + vendor/github.com/gdamore/tcell/v2/event.go | 53 + vendor/github.com/gdamore/tcell/v2/go.mod | 12 + vendor/github.com/gdamore/tcell/v2/go.sum | 14 + .../github.com/gdamore/tcell/v2/interrupt.go | 41 + vendor/github.com/gdamore/tcell/v2/key.go | 470 + vendor/github.com/gdamore/tcell/v2/mouse.go | 103 + .../gdamore/tcell/v2/nonblock_bsd.go | 42 + .../gdamore/tcell/v2/nonblock_unix.go | 40 + vendor/github.com/gdamore/tcell/v2/paste.go | 48 + vendor/github.com/gdamore/tcell/v2/resize.go | 42 + vendor/github.com/gdamore/tcell/v2/runes.go | 111 + vendor/github.com/gdamore/tcell/v2/screen.go | 260 + .../github.com/gdamore/tcell/v2/simulation.go | 554 + .../github.com/gdamore/tcell/v2/stdin_unix.go | 177 + vendor/github.com/gdamore/tcell/v2/style.go | 137 + .../gdamore/tcell/v2/terminfo/.gitignore | 1 + .../gdamore/tcell/v2/terminfo/README.md | 25 + .../gdamore/tcell/v2/terminfo/TERMINALS.md | 7 + .../tcell/v2/terminfo/a/aixterm/term.go | 83 + .../tcell/v2/terminfo/a/alacritty/term.go | 69 + .../gdamore/tcell/v2/terminfo/a/ansi/term.go | 43 + .../tcell/v2/terminfo/b/beterm/term.go | 57 + .../gdamore/tcell/v2/terminfo/base/base.go | 32 + .../tcell/v2/terminfo/c/cygwin/term.go | 66 + .../tcell/v2/terminfo/d/dtterm/term.go | 69 + .../tcell/v2/terminfo/dynamic/dynamic.go | 427 + .../gdamore/tcell/v2/terminfo/e/emacs/term.go | 63 + .../tcell/v2/terminfo/extended/extended.go | 58 + .../gdamore/tcell/v2/terminfo/f/foot/foot.go | 69 + .../gdamore/tcell/v2/terminfo/g/gnome/term.go | 130 + .../gdamore/tcell/v2/terminfo/gen.sh | 18 + .../tcell/v2/terminfo/h/hpterm/term.go | 51 + .../tcell/v2/terminfo/k/konsole/term.go | 130 + .../gdamore/tcell/v2/terminfo/k/kterm/term.go | 68 + .../gdamore/tcell/v2/terminfo/l/linux/term.go | 71 + .../gdamore/tcell/v2/terminfo/models.txt | 31 + .../tcell/v2/terminfo/p/pcansi/term.go | 41 + .../gdamore/tcell/v2/terminfo/r/rxvt/term.go | 485 + .../tcell/v2/terminfo/s/screen/term.go | 128 + .../tcell/v2/terminfo/s/simpleterm/term.go | 136 + .../gdamore/tcell/v2/terminfo/s/sun/term.go | 112 + .../tcell/v2/terminfo/t/termite/term.go | 67 + .../gdamore/tcell/v2/terminfo/t/tmux/term.go | 71 + .../gdamore/tcell/v2/terminfo/terminfo.go | 827 ++ .../gdamore/tcell/v2/terminfo/v/vt100/term.go | 49 + .../gdamore/tcell/v2/terminfo/v/vt102/term.go | 48 + .../gdamore/tcell/v2/terminfo/v/vt220/term.go | 59 + .../gdamore/tcell/v2/terminfo/v/vt320/term.go | 64 + .../gdamore/tcell/v2/terminfo/v/vt400/term.go | 48 + .../gdamore/tcell/v2/terminfo/v/vt420/term.go | 54 + .../gdamore/tcell/v2/terminfo/v/vt52/term.go | 29 + .../gdamore/tcell/v2/terminfo/w/wy50/term.go | 60 + .../gdamore/tcell/v2/terminfo/w/wy60/term.go | 64 + .../tcell/v2/terminfo/w/wy99_ansi/term.go | 116 + .../gdamore/tcell/v2/terminfo/x/xfce/term.go | 67 + .../tcell/v2/terminfo/x/xterm/direct.go | 92 + .../gdamore/tcell/v2/terminfo/x/xterm/term.go | 192 + .../tcell/v2/terminfo/x/xterm_kitty/term.go | 69 + .../tcell/v2/terminfo/x/xterm_termite/term.go | 66 + .../gdamore/tcell/v2/terms_default.go | 23 + .../gdamore/tcell/v2/terms_dynamic.go | 37 + .../gdamore/tcell/v2/terms_static.go | 27 + vendor/github.com/gdamore/tcell/v2/tscreen.go | 1727 +++ .../gdamore/tcell/v2/tscreen_stub.go | 25 + .../gdamore/tcell/v2/tscreen_unix.go | 31 + vendor/github.com/gdamore/tcell/v2/tty.go | 56 + .../github.com/gdamore/tcell/v2/tty_unix.go | 190 + vendor/github.com/go-git/gcfg/LICENSE | 28 + vendor/github.com/go-git/gcfg/README | 4 + vendor/github.com/go-git/gcfg/doc.go | 145 + vendor/github.com/go-git/gcfg/errors.go | 41 + vendor/github.com/go-git/gcfg/go1_0.go | 7 + vendor/github.com/go-git/gcfg/go1_2.go | 9 + vendor/github.com/go-git/gcfg/read.go | 273 + .../github.com/go-git/gcfg/scanner/errors.go | 121 + .../github.com/go-git/gcfg/scanner/scanner.go | 342 + vendor/github.com/go-git/gcfg/set.go | 332 + .../github.com/go-git/gcfg/token/position.go | 435 + .../github.com/go-git/gcfg/token/serialize.go | 56 + vendor/github.com/go-git/gcfg/token/token.go | 83 + vendor/github.com/go-git/gcfg/types/bool.go | 23 + vendor/github.com/go-git/gcfg/types/doc.go | 4 + vendor/github.com/go-git/gcfg/types/enum.go | 44 + vendor/github.com/go-git/gcfg/types/int.go | 86 + vendor/github.com/go-git/gcfg/types/scan.go | 23 + .../github.com/go-git/go-billy/v5/.gitignore | 4 + vendor/github.com/go-git/go-billy/v5/LICENSE | 201 + .../github.com/go-git/go-billy/v5/README.md | 73 + vendor/github.com/go-git/go-billy/v5/fs.go | 202 + vendor/github.com/go-git/go-billy/v5/go.mod | 10 + vendor/github.com/go-git/go-billy/v5/go.sum | 14 + .../go-billy/v5/helper/chroot/chroot.go | 242 + .../go-billy/v5/helper/polyfill/polyfill.go | 105 + .../go-git/go-billy/v5/memfs/memory.go | 410 + .../go-git/go-billy/v5/memfs/storage.go | 229 + .../github.com/go-git/go-billy/v5/osfs/os.go | 144 + .../go-git/go-billy/v5/osfs/os_js.go | 21 + .../go-git/go-billy/v5/osfs/os_plan9.go | 85 + .../go-git/go-billy/v5/osfs/os_posix.go | 27 + .../go-git/go-billy/v5/osfs/os_windows.go | 61 + .../go-git/go-billy/v5/util/glob.go | 111 + .../go-git/go-billy/v5/util/util.go | 282 + vendor/github.com/go-git/go-git/v5/.gitignore | 4 + .../go-git/go-git/v5/CODE_OF_CONDUCT.md | 74 + .../go-git/go-git/v5/COMPATIBILITY.md | 111 + .../go-git/go-git/v5/CONTRIBUTING.md | 46 + vendor/github.com/go-git/go-git/v5/LICENSE | 201 + vendor/github.com/go-git/go-git/v5/Makefile | 38 + vendor/github.com/go-git/go-git/v5/README.md | 131 + vendor/github.com/go-git/go-git/v5/blame.go | 302 + vendor/github.com/go-git/go-git/v5/common.go | 20 + .../go-git/go-git/v5/config/branch.go | 90 + .../go-git/go-git/v5/config/config.go | 659 + .../go-git/go-git/v5/config/modules.go | 139 + .../go-git/go-git/v5/config/refspec.go | 155 + .../github.com/go-git/go-git/v5/config/url.go | 81 + vendor/github.com/go-git/go-git/v5/doc.go | 10 + vendor/github.com/go-git/go-git/v5/go.mod | 31 + vendor/github.com/go-git/go-git/v5/go.sum | 103 + .../go-git/v5/internal/revision/parser.go | 622 + .../go-git/v5/internal/revision/scanner.go | 117 + .../go-git/v5/internal/revision/token.go | 28 + .../go-git/go-git/v5/internal/url/url.go | 37 + .../go-git/go-git/v5/object_walker.go | 104 + vendor/github.com/go-git/go-git/v5/options.go | 634 + .../go-git/v5/plumbing/cache/buffer_lru.go | 98 + .../go-git/go-git/v5/plumbing/cache/common.go | 39 + .../go-git/v5/plumbing/cache/object_lru.go | 101 + .../go-git/go-git/v5/plumbing/color/color.go | 38 + .../go-git/go-git/v5/plumbing/error.go | 35 + .../go-git/v5/plumbing/filemode/filemode.go | 188 + .../v5/plumbing/format/config/common.go | 109 + .../v5/plumbing/format/config/decoder.go | 37 + .../go-git/v5/plumbing/format/config/doc.go | 122 + .../v5/plumbing/format/config/encoder.go | 77 + .../v5/plumbing/format/config/option.go | 127 + .../v5/plumbing/format/config/section.go | 181 + .../v5/plumbing/format/diff/colorconfig.go | 97 + .../go-git/v5/plumbing/format/diff/patch.go | 58 + .../plumbing/format/diff/unified_encoder.go | 395 + .../v5/plumbing/format/gitignore/dir.go | 135 + .../v5/plumbing/format/gitignore/doc.go | 70 + .../v5/plumbing/format/gitignore/matcher.go | 30 + .../v5/plumbing/format/gitignore/pattern.go | 153 + .../v5/plumbing/format/idxfile/decoder.go | 177 + .../go-git/v5/plumbing/format/idxfile/doc.go | 128 + .../v5/plumbing/format/idxfile/encoder.go | 142 + .../v5/plumbing/format/idxfile/idxfile.go | 346 + .../v5/plumbing/format/idxfile/writer.go | 186 + .../v5/plumbing/format/index/decoder.go | 479 + .../go-git/v5/plumbing/format/index/doc.go | 360 + .../v5/plumbing/format/index/encoder.go | 150 + .../go-git/v5/plumbing/format/index/index.go | 213 + .../go-git/v5/plumbing/format/index/match.go | 186 + .../go-git/v5/plumbing/format/objfile/doc.go | 2 + .../v5/plumbing/format/objfile/reader.go | 114 + .../v5/plumbing/format/objfile/writer.go | 109 + .../v5/plumbing/format/packfile/common.go | 78 + .../plumbing/format/packfile/delta_index.go | 297 + .../format/packfile/delta_selector.go | 369 + .../v5/plumbing/format/packfile/diff_delta.go | 204 + .../go-git/v5/plumbing/format/packfile/doc.go | 39 + .../v5/plumbing/format/packfile/encoder.go | 225 + .../v5/plumbing/format/packfile/error.go | 30 + .../v5/plumbing/format/packfile/fsobject.go | 116 + .../plumbing/format/packfile/object_pack.go | 164 + .../v5/plumbing/format/packfile/packfile.go | 565 + .../v5/plumbing/format/packfile/parser.go | 495 + .../plumbing/format/packfile/patch_delta.go | 252 + .../v5/plumbing/format/packfile/scanner.go | 466 + .../v5/plumbing/format/pktline/encoder.go | 122 + .../v5/plumbing/format/pktline/scanner.go | 134 + .../go-git/go-git/v5/plumbing/hash.go | 83 + .../go-git/go-git/v5/plumbing/memory.go | 72 + .../go-git/go-git/v5/plumbing/object.go | 111 + .../go-git/go-git/v5/plumbing/object/blob.go | 144 + .../go-git/v5/plumbing/object/change.go | 159 + .../v5/plumbing/object/change_adaptor.go | 61 + .../go-git/v5/plumbing/object/commit.go | 442 + .../v5/plumbing/object/commit_walker.go | 327 + .../v5/plumbing/object/commit_walker_bfs.go | 100 + .../object/commit_walker_bfs_filtered.go | 175 + .../v5/plumbing/object/commit_walker_ctime.go | 103 + .../v5/plumbing/object/commit_walker_limit.go | 65 + .../v5/plumbing/object/commit_walker_path.go | 160 + .../go-git/v5/plumbing/object/common.go | 12 + .../go-git/v5/plumbing/object/difftree.go | 98 + .../go-git/go-git/v5/plumbing/object/file.go | 137 + .../go-git/v5/plumbing/object/merge_base.go | 210 + .../go-git/v5/plumbing/object/object.go | 239 + .../go-git/go-git/v5/plumbing/object/patch.go | 354 + .../go-git/v5/plumbing/object/rename.go | 813 ++ .../go-git/go-git/v5/plumbing/object/tag.go | 357 + .../go-git/go-git/v5/plumbing/object/tree.go | 525 + .../go-git/v5/plumbing/object/treenoder.go | 136 + .../v5/plumbing/protocol/packp/advrefs.go | 211 + .../plumbing/protocol/packp/advrefs_decode.go | 288 + .../plumbing/protocol/packp/advrefs_encode.go | 176 + .../protocol/packp/capability/capability.go | 259 + .../protocol/packp/capability/list.go | 193 + .../v5/plumbing/protocol/packp/common.go | 70 + .../go-git/v5/plumbing/protocol/packp/doc.go | 724 + .../plumbing/protocol/packp/report_status.go | 165 + .../v5/plumbing/protocol/packp/shallowupd.go | 92 + .../protocol/packp/sideband/common.go | 33 + .../plumbing/protocol/packp/sideband/demux.go | 148 + .../plumbing/protocol/packp/sideband/doc.go | 31 + .../plumbing/protocol/packp/sideband/muxer.go | 65 + .../v5/plumbing/protocol/packp/srvresp.go | 127 + .../v5/plumbing/protocol/packp/ulreq.go | 168 + .../plumbing/protocol/packp/ulreq_decode.go | 257 + .../plumbing/protocol/packp/ulreq_encode.go | 145 + .../v5/plumbing/protocol/packp/updreq.go | 122 + .../plumbing/protocol/packp/updreq_decode.go | 250 + .../plumbing/protocol/packp/updreq_encode.go | 75 + .../v5/plumbing/protocol/packp/uppackreq.go | 98 + .../v5/plumbing/protocol/packp/uppackresp.go | 109 + .../go-git/go-git/v5/plumbing/reference.go | 209 + .../go-git/go-git/v5/plumbing/revision.go | 11 + .../go-git/v5/plumbing/revlist/revlist.go | 230 + .../go-git/go-git/v5/plumbing/storer/doc.go | 2 + .../go-git/go-git/v5/plumbing/storer/index.go | 9 + .../go-git/v5/plumbing/storer/object.go | 288 + .../go-git/v5/plumbing/storer/reference.go | 240 + .../go-git/v5/plumbing/storer/shallow.go | 10 + .../go-git/v5/plumbing/storer/storer.go | 15 + .../v5/plumbing/transport/client/client.go | 83 + .../go-git/v5/plumbing/transport/common.go | 283 + .../v5/plumbing/transport/file/client.go | 157 + .../v5/plumbing/transport/file/server.go | 53 + .../v5/plumbing/transport/git/common.go | 109 + .../v5/plumbing/transport/http/common.go | 282 + .../plumbing/transport/http/receive_pack.go | 110 + .../v5/plumbing/transport/http/upload_pack.go | 127 + .../transport/internal/common/common.go | 478 + .../transport/internal/common/server.go | 73 + .../v5/plumbing/transport/server/loader.go | 64 + .../v5/plumbing/transport/server/server.go | 432 + .../v5/plumbing/transport/ssh/auth_method.go | 308 + .../v5/plumbing/transport/ssh/common.go | 235 + vendor/github.com/go-git/go-git/v5/prune.go | 66 + .../github.com/go-git/go-git/v5/references.go | 264 + vendor/github.com/go-git/go-git/v5/remote.go | 1262 ++ .../github.com/go-git/go-git/v5/repository.go | 1722 +++ vendor/github.com/go-git/go-git/v5/status.go | 79 + .../go-git/v5/storage/filesystem/config.go | 48 + .../v5/storage/filesystem/deltaobject.go | 37 + .../v5/storage/filesystem/dotgit/dotgit.go | 1188 ++ .../dotgit/dotgit_rewrite_packed_refs.go | 81 + .../filesystem/dotgit/dotgit_setref.go | 90 + .../dotgit/repository_filesystem.go | 111 + .../v5/storage/filesystem/dotgit/writers.go | 284 + .../go-git/v5/storage/filesystem/index.go | 54 + .../go-git/v5/storage/filesystem/module.go | 20 + .../go-git/v5/storage/filesystem/object.go | 848 ++ .../go-git/v5/storage/filesystem/reference.go | 44 + .../go-git/v5/storage/filesystem/shallow.go | 54 + .../go-git/v5/storage/filesystem/storage.go | 73 + .../go-git/v5/storage/memory/storage.go | 320 + .../go-git/go-git/v5/storage/storer.go | 30 + .../github.com/go-git/go-git/v5/submodule.go | 397 + .../go-git/go-git/v5/utils/binary/read.go | 180 + .../go-git/go-git/v5/utils/binary/write.go | 50 + .../go-git/go-git/v5/utils/diff/diff.go | 61 + .../go-git/go-git/v5/utils/ioutil/common.go | 180 + .../go-git/go-git/v5/utils/ioutil/pipe.go | 9 + .../go-git/go-git/v5/utils/ioutil/pipe_js.go | 9 + .../go-git/v5/utils/merkletrie/change.go | 149 + .../go-git/v5/utils/merkletrie/difftree.go | 428 + .../go-git/go-git/v5/utils/merkletrie/doc.go | 34 + .../go-git/v5/utils/merkletrie/doubleiter.go | 187 + .../v5/utils/merkletrie/filesystem/node.go | 195 + .../go-git/v5/utils/merkletrie/index/node.go | 90 + .../utils/merkletrie/internal/frame/frame.go | 91 + .../go-git/go-git/v5/utils/merkletrie/iter.go | 216 + .../go-git/v5/utils/merkletrie/noder/noder.go | 59 + .../go-git/v5/utils/merkletrie/noder/path.go | 90 + .../github.com/go-git/go-git/v5/worktree.go | 965 ++ .../go-git/go-git/v5/worktree_bsd.go | 26 + .../go-git/go-git/v5/worktree_commit.go | 238 + .../go-git/go-git/v5/worktree_js.go | 26 + .../go-git/go-git/v5/worktree_linux.go | 26 + .../go-git/go-git/v5/worktree_plan9.go | 31 + .../go-git/go-git/v5/worktree_status.go | 708 + .../go-git/go-git/v5/worktree_unix_other.go | 26 + .../go-git/go-git/v5/worktree_windows.go | 35 + vendor/github.com/gobuffalo/here/.gitignore | 28 + .../github.com/gobuffalo/here/.goreleaser.yml | 45 + vendor/github.com/gobuffalo/here/LICENSE | 21 + vendor/github.com/gobuffalo/here/Makefile | 50 + vendor/github.com/gobuffalo/here/README.md | 277 + vendor/github.com/gobuffalo/here/SHOULDERS.md | 24 + vendor/github.com/gobuffalo/here/current.go | 31 + vendor/github.com/gobuffalo/here/dir.go | 83 + vendor/github.com/gobuffalo/here/go.mod | 11 + vendor/github.com/gobuffalo/here/go.sum | 19 + vendor/github.com/gobuffalo/here/here.go | 57 + vendor/github.com/gobuffalo/here/info.go | 39 + vendor/github.com/gobuffalo/here/info_map.go | 32 + vendor/github.com/gobuffalo/here/module.go | 29 + vendor/github.com/gobuffalo/here/parse.go | 63 + vendor/github.com/gobuffalo/here/path.go | 20 + vendor/github.com/gobuffalo/here/pkg.go | 57 + vendor/github.com/gobuffalo/here/version.go | 4 + vendor/github.com/google/cel-go/LICENSE | 202 + vendor/github.com/google/cel-go/cel/cel.go | 19 + vendor/github.com/google/cel-go/cel/env.go | 466 + vendor/github.com/google/cel-go/cel/io.go | 89 + .../github.com/google/cel-go/cel/library.go | 77 + .../github.com/google/cel-go/cel/options.go | 379 + .../github.com/google/cel-go/cel/program.go | 318 + .../google/cel-go/checker/checker.go | 637 + .../google/cel-go/checker/decls/decls.go | 231 + .../google/cel-go/checker/decls/scopes.go | 115 + .../github.com/google/cel-go/checker/env.go | 354 + .../google/cel-go/checker/errors.go | 123 + .../google/cel-go/checker/mapping.go | 62 + .../google/cel-go/checker/printer.go | 71 + .../google/cel-go/checker/standard.go | 440 + .../github.com/google/cel-go/checker/types.go | 507 + .../cel-go/common/containers/container.go | 316 + .../google/cel-go/common/debug/debug.go | 305 + vendor/github.com/google/cel-go/common/doc.go | 17 + .../github.com/google/cel-go/common/error.go | 70 + .../github.com/google/cel-go/common/errors.go | 74 + .../google/cel-go/common/location.go | 51 + .../cel-go/common/operators/operators.go | 145 + .../cel-go/common/overloads/overloads.go | 293 + .../github.com/google/cel-go/common/source.go | 182 + .../google/cel-go/common/types/any_value.go | 24 + .../google/cel-go/common/types/bool.go | 144 + .../google/cel-go/common/types/bytes.go | 135 + .../google/cel-go/common/types/doc.go | 17 + .../google/cel-go/common/types/double.go | 201 + .../google/cel-go/common/types/duration.go | 180 + .../google/cel-go/common/types/err.go | 107 + .../google/cel-go/common/types/int.go | 276 + .../google/cel-go/common/types/iterator.go | 55 + .../google/cel-go/common/types/json_value.go | 28 + .../google/cel-go/common/types/list.go | 434 + .../google/cel-go/common/types/map.go | 787 ++ .../google/cel-go/common/types/null.go | 101 + .../google/cel-go/common/types/object.go | 159 + .../google/cel-go/common/types/pb/checked.go | 93 + .../google/cel-go/common/types/pb/enum.go | 44 + .../google/cel-go/common/types/pb/file.go | 141 + .../google/cel-go/common/types/pb/pb.go | 196 + .../google/cel-go/common/types/pb/type.go | 532 + .../google/cel-go/common/types/provider.go | 534 + .../cel-go/common/types/ref/provider.go | 103 + .../cel-go/common/types/ref/reference.go | 59 + .../google/cel-go/common/types/string.go | 218 + .../google/cel-go/common/types/timestamp.go | 294 + .../cel-go/common/types/traits/comparer.go | 33 + .../cel-go/common/types/traits/container.go | 23 + .../common/types/traits/field_tester.go | 30 + .../cel-go/common/types/traits/indexer.go | 25 + .../cel-go/common/types/traits/iterator.go | 36 + .../cel-go/common/types/traits/lister.go | 27 + .../cel-go/common/types/traits/mapper.go | 33 + .../cel-go/common/types/traits/matcher.go | 23 + .../google/cel-go/common/types/traits/math.go | 62 + .../cel-go/common/types/traits/receiver.go | 24 + .../cel-go/common/types/traits/sizer.go | 25 + .../cel-go/common/types/traits/traits.go | 64 + .../google/cel-go/common/types/type.go | 104 + .../google/cel-go/common/types/uint.go | 220 + .../google/cel-go/common/types/unknown.go | 61 + .../google/cel-go/common/types/util.go | 38 + .../google/cel-go/interpreter/activation.go | 197 + .../cel-go/interpreter/attribute_patterns.go | 383 + .../google/cel-go/interpreter/attributes.go | 1032 ++ .../google/cel-go/interpreter/coster.go | 31 + .../google/cel-go/interpreter/decorators.go | 201 + .../google/cel-go/interpreter/dispatcher.go | 100 + .../google/cel-go/interpreter/evalstate.go | 75 + .../cel-go/interpreter/functions/functions.go | 58 + .../cel-go/interpreter/functions/standard.go | 272 + .../cel-go/interpreter/interpretable.go | 1204 ++ .../google/cel-go/interpreter/interpreter.go | 137 + .../google/cel-go/interpreter/planner.go | 773 ++ .../google/cel-go/interpreter/prune.go | 392 + .../github.com/google/cel-go/parser/errors.go | 42 + .../google/cel-go/parser/gen/CEL.g4 | 186 + .../google/cel-go/parser/gen/CEL.tokens | 64 + .../google/cel-go/parser/gen/CELLexer.tokens | 64 + .../cel-go/parser/gen/cel_base_listener.go | 195 + .../cel-go/parser/gen/cel_base_visitor.go | 124 + .../google/cel-go/parser/gen/cel_lexer.go | 319 + .../google/cel-go/parser/gen/cel_listener.go | 183 + .../google/cel-go/parser/gen/cel_parser.go | 4097 ++++++ .../google/cel-go/parser/gen/cel_visitor.go | 96 + .../google/cel-go/parser/gen/doc.go | 16 + .../github.com/google/cel-go/parser/helper.go | 392 + .../github.com/google/cel-go/parser/macro.go | 386 + .../github.com/google/cel-go/parser/parser.go | 650 + .../google/cel-go/parser/unescape.go | 237 + .../google/cel-go/parser/unparser.go | 442 + .../internal/and/and_closer.go | 48 + .../internal/estargz/estargz.go | 55 + .../go-containerregistry/internal/gzip/zip.go | 117 + .../internal/redact/redact.go | 35 + .../internal/retry/retry.go | 77 + .../wait/kubernetes_apimachinery_wait.go | 123 + .../internal/verify/verify.go | 107 + .../go-containerregistry/pkg/authn/README.md | 242 + .../go-containerregistry/pkg/authn/anon.go | 26 + .../go-containerregistry/pkg/authn/auth.go | 30 + .../go-containerregistry/pkg/authn/authn.go | 36 + .../go-containerregistry/pkg/authn/basic.go | 29 + .../go-containerregistry/pkg/authn/bearer.go | 27 + .../go-containerregistry/pkg/authn/doc.go | 17 + .../pkg/authn/keychain.go | 94 + .../pkg/authn/multikeychain.go | 41 + .../go-containerregistry/pkg/logs/logs.go | 39 + .../go-containerregistry/pkg/v1/config.go | 133 + .../pkg/v1/daemon/README.md | 11 + .../go-containerregistry/pkg/v1/daemon/doc.go | 17 + .../pkg/v1/daemon/image.go | 89 + .../pkg/v1/daemon/options.go | 102 + .../pkg/v1/daemon/write.go | 61 + .../google/go-containerregistry/pkg/v1/doc.go | 18 + .../pkg/v1/empty/README.md | 8 + .../go-containerregistry/pkg/v1/empty/doc.go | 16 + .../pkg/v1/empty/image.go | 52 + .../pkg/v1/empty/index.go | 63 + .../go-containerregistry/pkg/v1/hash.go | 123 + .../go-containerregistry/pkg/v1/image.go | 59 + .../go-containerregistry/pkg/v1/index.go | 43 + .../go-containerregistry/pkg/v1/layer.go | 42 + .../pkg/v1/layout/README.md | 5 + .../pkg/v1/layout/blob.go | 38 + .../go-containerregistry/pkg/v1/layout/doc.go | 19 + .../pkg/v1/layout/image.go | 141 + .../pkg/v1/layout/index.go | 161 + .../pkg/v1/layout/layoutpath.go | 25 + .../pkg/v1/layout/options.go | 57 + .../pkg/v1/layout/read.go | 32 + .../pkg/v1/layout/write.go | 436 + .../go-containerregistry/pkg/v1/manifest.go | 68 + .../pkg/v1/match/match.go | 90 + .../pkg/v1/mutate/README.md | 56 + .../go-containerregistry/pkg/v1/mutate/doc.go | 16 + .../pkg/v1/mutate/image.go | 286 + .../pkg/v1/mutate/index.go | 209 + .../pkg/v1/mutate/mutate.go | 464 + .../pkg/v1/mutate/rebase.go | 144 + .../pkg/v1/partial/README.md | 82 + .../pkg/v1/partial/compressed.go | 163 + .../pkg/v1/partial/doc.go | 17 + .../pkg/v1/partial/image.go | 28 + .../pkg/v1/partial/index.go | 85 + .../pkg/v1/partial/uncompressed.go | 223 + .../pkg/v1/partial/with.go | 389 + .../go-containerregistry/pkg/v1/platform.go | 59 + .../go-containerregistry/pkg/v1/progress.go | 25 + .../pkg/v1/remote/README.md | 117 + .../pkg/v1/remote/catalog.go | 153 + .../pkg/v1/remote/check.go | 59 + .../pkg/v1/remote/delete.go | 57 + .../pkg/v1/remote/descriptor.go | 430 + .../go-containerregistry/pkg/v1/remote/doc.go | 17 + .../pkg/v1/remote/image.go | 248 + .../pkg/v1/remote/index.go | 273 + .../pkg/v1/remote/layer.go | 94 + .../pkg/v1/remote/list.go | 140 + .../pkg/v1/remote/mount.go | 95 + .../pkg/v1/remote/multi_write.go | 298 + .../pkg/v1/remote/options.go | 195 + .../pkg/v1/remote/transport/README.md | 129 + .../pkg/v1/remote/transport/basic.go | 62 + .../pkg/v1/remote/transport/bearer.go | 311 + .../pkg/v1/remote/transport/doc.go | 18 + .../pkg/v1/remote/transport/error.go | 197 + .../pkg/v1/remote/transport/logger.go | 91 + .../pkg/v1/remote/transport/ping.go | 148 + .../pkg/v1/remote/transport/retry.go | 88 + .../pkg/v1/remote/transport/schemer.go | 44 + .../pkg/v1/remote/transport/scope.go | 24 + .../pkg/v1/remote/transport/transport.go | 103 + .../pkg/v1/remote/transport/useragent.go | 94 + .../pkg/v1/remote/write.go | 907 ++ .../pkg/v1/stream/README.md | 68 + .../pkg/v1/stream/layer.go | 242 + .../pkg/v1/tarball/README.md | 280 + .../pkg/v1/tarball/doc.go | 17 + .../pkg/v1/tarball/image.go | 423 + .../pkg/v1/tarball/layer.go | 282 + .../pkg/v1/tarball/write.go | 455 + .../pkg/v1/types/types.go | 71 + .../pkg/v1/zz_deepcopy_generated.go | 323 + .../gnostic/extensions/extension.pb.go | 26 +- .../gnostic/extensions/extension.proto | 5 +- .../googleapis/gnostic/openapiv2/OpenAPIv2.go | 5 +- .../gnostic/openapiv2/OpenAPIv2.pb.go | 27 +- .../gnostic/openapiv2/OpenAPIv2.proto | 2 +- .../github.com/gregjones/httpcache/README.md | 4 + vendor/github.com/hako/durafmt/.gitignore | 24 + vendor/github.com/hako/durafmt/.travis.yml | 21 + .../hako/durafmt/CODE_OF_CONDUCT.md | 46 + .../github.com/hako/durafmt/CONTRIBUTING.md | 9 + vendor/github.com/hako/durafmt/LICENSE | 21 + vendor/github.com/hako/durafmt/README.md | 126 + vendor/github.com/hako/durafmt/durafmt.go | 157 + vendor/github.com/hashicorp/errwrap/LICENSE | 354 + vendor/github.com/hashicorp/errwrap/README.md | 89 + .../github.com/hashicorp/errwrap/errwrap.go | 178 + vendor/github.com/hashicorp/errwrap/go.mod | 1 + .../hashicorp/go-multierror/LICENSE | 353 + .../hashicorp/go-multierror/Makefile | 31 + .../hashicorp/go-multierror/README.md | 150 + .../hashicorp/go-multierror/append.go | 43 + .../hashicorp/go-multierror/flatten.go | 26 + .../hashicorp/go-multierror/format.go | 27 + .../github.com/hashicorp/go-multierror/go.mod | 5 + .../github.com/hashicorp/go-multierror/go.sum | 2 + .../hashicorp/go-multierror/group.go | 38 + .../hashicorp/go-multierror/multierror.go | 121 + .../hashicorp/go-multierror/prefix.go | 37 + .../hashicorp/go-multierror/sort.go | 16 + vendor/github.com/heroku/color/.gitignore | 16 + vendor/github.com/heroku/color/.golangcli.yml | 26 + vendor/github.com/heroku/color/LICENSE | 21 + vendor/github.com/heroku/color/Makefile | 21 + vendor/github.com/heroku/color/README.md | 109 + vendor/github.com/heroku/color/attributes.go | 175 + vendor/github.com/heroku/color/cache.go | 53 + vendor/github.com/heroku/color/color.go | 271 + vendor/github.com/heroku/color/console.go | 167 + vendor/github.com/heroku/color/go.mod | 5 + vendor/github.com/heroku/color/go.sum | 6 + vendor/github.com/jbenet/go-context/LICENSE | 21 + .../github.com/jbenet/go-context/io/ctxio.go | 120 + .../jonboulle/clockwork/.editorconfig | 12 + .../github.com/jonboulle/clockwork/.gitignore | 27 + vendor/github.com/jonboulle/clockwork/LICENSE | 201 + .../github.com/jonboulle/clockwork/README.md | 80 + .../jonboulle/clockwork/clockwork.go | 195 + vendor/github.com/jonboulle/clockwork/go.mod | 3 + .../github.com/jonboulle/clockwork/ticker.go | 72 + .../github.com/kballard/go-shellquote/LICENSE | 19 + .../github.com/kballard/go-shellquote/README | 36 + .../github.com/kballard/go-shellquote/doc.go | 3 + .../kballard/go-shellquote/quote.go | 102 + .../kballard/go-shellquote/unquote.go | 156 + .../kevinburke/ssh_config/.gitattributes | 1 + .../kevinburke/ssh_config/.gitignore | 0 .../github.com/kevinburke/ssh_config/.mailmap | 1 + .../kevinburke/ssh_config/.travis.yml | 20 + .../kevinburke/ssh_config/AUTHORS.txt | 6 + .../github.com/kevinburke/ssh_config/LICENSE | 49 + .../github.com/kevinburke/ssh_config/Makefile | 30 + .../kevinburke/ssh_config/README.md | 81 + .../kevinburke/ssh_config/config.go | 649 + .../github.com/kevinburke/ssh_config/lexer.go | 240 + .../kevinburke/ssh_config/parser.go | 191 + .../kevinburke/ssh_config/position.go | 25 + .../github.com/kevinburke/ssh_config/token.go | 49 + .../kevinburke/ssh_config/validators.go | 162 + .../lucasb-eyer/go-colorful/.gitignore | 101 + .../lucasb-eyer/go-colorful/CHANGELOG.md | 42 + .../lucasb-eyer/go-colorful/LICENSE | 7 + .../lucasb-eyer/go-colorful/README.md | 482 + .../lucasb-eyer/go-colorful/colorgens.go | 55 + .../lucasb-eyer/go-colorful/colors.go | 979 ++ .../github.com/lucasb-eyer/go-colorful/go.mod | 3 + .../go-colorful/happy_palettegen.go | 25 + .../lucasb-eyer/go-colorful/hexcolor.go | 67 + .../go-colorful/hsluv-snapshot-rev4.json | 1 + .../lucasb-eyer/go-colorful/hsluv.go | 207 + .../go-colorful/soft_palettegen.go | 185 + .../go-colorful/warm_palettegen.go | 25 + vendor/github.com/markbates/pkger/.gitignore | 35 + .../markbates/pkger/.goreleaser.yml | 45 + vendor/github.com/markbates/pkger/LICENSE | 21 + vendor/github.com/markbates/pkger/Makefile | 54 + vendor/github.com/markbates/pkger/README.md | 432 + .../github.com/markbates/pkger/SHOULDERS.md | 24 + vendor/github.com/markbates/pkger/apply.go | 19 + vendor/github.com/markbates/pkger/go.mod | 8 + vendor/github.com/markbates/pkger/go.sum | 22 + .../github.com/markbates/pkger/here/here.go | 14 + .../markbates/pkger/internal/maps/files.go | 124 + .../markbates/pkger/internal/maps/infos.go | 103 + vendor/github.com/markbates/pkger/pkger.go | 95 + .../markbates/pkger/pkging/embed/embed.go | 55 + .../markbates/pkger/pkging/embed/file.go | 14 + .../markbates/pkger/pkging/faces.go | 7 + .../github.com/markbates/pkger/pkging/file.go | 44 + .../markbates/pkger/pkging/file_info.go | 65 + .../markbates/pkger/pkging/mem/add.go | 104 + .../markbates/pkger/pkging/mem/embed.go | 92 + .../markbates/pkger/pkging/mem/file.go | 203 + .../markbates/pkger/pkging/mem/mem.go | 252 + .../markbates/pkger/pkging/mod_time.go | 24 + .../markbates/pkger/pkging/pkger.go | 43 + .../markbates/pkger/pkging/stdos/file.go | 73 + .../markbates/pkger/pkging/stdos/json.go | 1 + .../markbates/pkger/pkging/stdos/stdos.go | 223 + .../github.com/markbates/pkger/pkging/wrap.go | 145 + vendor/github.com/markbates/pkger/version.go | 4 + vendor/github.com/mattn/go-colorable/LICENSE | 21 + .../github.com/mattn/go-colorable/README.md | 48 + .../mattn/go-colorable/colorable_appengine.go | 38 + .../mattn/go-colorable/colorable_others.go | 38 + .../mattn/go-colorable/colorable_windows.go | 1047 ++ vendor/github.com/mattn/go-colorable/go.mod | 8 + vendor/github.com/mattn/go-colorable/go.sum | 5 + .../github.com/mattn/go-colorable/go.test.sh | 12 + .../mattn/go-colorable/noncolorable.go | 59 + vendor/github.com/mattn/go-isatty/LICENSE | 9 + vendor/github.com/mattn/go-isatty/README.md | 50 + vendor/github.com/mattn/go-isatty/doc.go | 2 + vendor/github.com/mattn/go-isatty/go.mod | 5 + vendor/github.com/mattn/go-isatty/go.sum | 2 + vendor/github.com/mattn/go-isatty/go.test.sh | 12 + .../github.com/mattn/go-isatty/isatty_bsd.go | 19 + .../mattn/go-isatty/isatty_others.go | 16 + .../mattn/go-isatty/isatty_plan9.go | 23 + .../mattn/go-isatty/isatty_solaris.go | 21 + .../mattn/go-isatty/isatty_tcgets.go | 19 + .../mattn/go-isatty/isatty_windows.go | 125 + .../github.com/mattn/go-runewidth/.travis.yml | 16 + vendor/github.com/mattn/go-runewidth/LICENSE | 21 + .../github.com/mattn/go-runewidth/README.md | 27 + vendor/github.com/mattn/go-runewidth/go.mod | 5 + vendor/github.com/mattn/go-runewidth/go.sum | 2 + .../github.com/mattn/go-runewidth/go.test.sh | 12 + .../mattn/go-runewidth/runewidth.go | 241 + .../mattn/go-runewidth/runewidth_appengine.go | 8 + .../mattn/go-runewidth/runewidth_js.go | 9 + .../mattn/go-runewidth/runewidth_posix.go | 82 + .../mattn/go-runewidth/runewidth_table.go | 437 + .../mattn/go-runewidth/runewidth_windows.go | 28 + vendor/github.com/mgutz/ansi/.gitignore | 1 + vendor/github.com/mgutz/ansi/LICENSE | 9 + vendor/github.com/mgutz/ansi/README.md | 121 + vendor/github.com/mgutz/ansi/ansi.go | 285 + vendor/github.com/mgutz/ansi/doc.go | 65 + vendor/github.com/mgutz/ansi/print.go | 57 + .../github.com/mitchellh/ioprogress/LICENSE | 21 + .../github.com/mitchellh/ioprogress/README.md | 42 + .../github.com/mitchellh/ioprogress/draw.go | 104 + .../github.com/mitchellh/ioprogress/reader.go | 107 + .../moby/spdystream/CONTRIBUTING.md | 13 + vendor/github.com/moby/spdystream/LICENSE | 202 + vendor/github.com/moby/spdystream/MAINTAINERS | 40 + vendor/github.com/moby/spdystream/NOTICE | 5 + vendor/github.com/moby/spdystream/README.md | 77 + .../github.com/moby/spdystream/connection.go | 972 ++ vendor/github.com/moby/spdystream/go.mod | 5 + vendor/github.com/moby/spdystream/go.sum | 2 + vendor/github.com/moby/spdystream/handlers.go | 52 + vendor/github.com/moby/spdystream/priority.go | 114 + .../moby/spdystream/spdy/dictionary.go | 203 + .../github.com/moby/spdystream/spdy/read.go | 364 + .../github.com/moby/spdystream/spdy/types.go | 291 + .../github.com/moby/spdystream/spdy/write.go | 334 + vendor/github.com/moby/spdystream/stream.go | 343 + vendor/github.com/moby/spdystream/utils.go | 32 + vendor/github.com/moby/sys/mount/LICENSE | 202 + vendor/github.com/moby/sys/mount/doc.go | 4 + vendor/github.com/moby/sys/mount/flags_bsd.go | 45 + .../github.com/moby/sys/mount/flags_linux.go | 87 + .../github.com/moby/sys/mount/flags_unix.go | 139 + vendor/github.com/moby/sys/mount/go.mod | 8 + vendor/github.com/moby/sys/mount/go.sum | 5 + .../github.com/moby/sys/mount/mount_errors.go | 46 + .../github.com/moby/sys/mount/mount_unix.go | 87 + .../github.com/moby/sys/mount/mounter_bsd.go | 61 + .../moby/sys/mount/mounter_linux.go | 73 + .../moby/sys/mount/mounter_unsupported.go | 7 + .../moby/sys/mount/sharedsubtree_linux.go | 73 + vendor/github.com/moby/sys/mountinfo/LICENSE | 202 + vendor/github.com/moby/sys/mountinfo/doc.go | 44 + vendor/github.com/moby/sys/mountinfo/go.mod | 5 + vendor/github.com/moby/sys/mountinfo/go.sum | 2 + .../moby/sys/mountinfo/mounted_linux.go | 58 + .../moby/sys/mountinfo/mounted_unix.go | 66 + .../moby/sys/mountinfo/mountinfo.go | 66 + .../moby/sys/mountinfo/mountinfo_bsd.go | 67 + .../moby/sys/mountinfo/mountinfo_filters.go | 63 + .../moby/sys/mountinfo/mountinfo_linux.go | 221 + .../sys/mountinfo/mountinfo_unsupported.go | 18 + .../moby/sys/mountinfo/mountinfo_windows.go | 10 + vendor/github.com/moby/term/.gitignore | 8 + vendor/github.com/moby/term/LICENSE | 191 + vendor/github.com/moby/term/README.md | 36 + vendor/github.com/moby/term/ascii.go | 66 + vendor/github.com/moby/term/go.mod | 12 + vendor/github.com/moby/term/go.sum | 23 + vendor/github.com/moby/term/proxy.go | 88 + vendor/github.com/moby/term/tc.go | 19 + vendor/github.com/moby/term/term.go | 120 + vendor/github.com/moby/term/term_windows.go | 231 + vendor/github.com/moby/term/termios.go | 35 + vendor/github.com/moby/term/termios_bsd.go | 12 + vendor/github.com/moby/term/termios_nonbsd.go | 12 + .../moby/term/windows/ansi_reader.go | 252 + .../moby/term/windows/ansi_writer.go | 56 + .../github.com/moby/term/windows/console.go | 39 + vendor/github.com/moby/term/windows/doc.go | 5 + vendor/github.com/moby/term/winsize.go | 20 + vendor/github.com/morikuni/aec/LICENSE | 21 + vendor/github.com/morikuni/aec/README.md | 178 + vendor/github.com/morikuni/aec/aec.go | 137 + vendor/github.com/morikuni/aec/ansi.go | 59 + vendor/github.com/morikuni/aec/builder.go | 388 + vendor/github.com/morikuni/aec/sample.gif | Bin 0 -> 12548 bytes vendor/github.com/morikuni/aec/sgr.go | 202 + .../opencontainers/go-digest/.mailmap | 4 + .../opencontainers/go-digest/.pullapprove.yml | 28 + .../opencontainers/go-digest/.travis.yml | 5 + .../opencontainers/go-digest/CONTRIBUTING.md | 72 + .../opencontainers/go-digest/LICENSE | 192 + .../opencontainers/go-digest/LICENSE.docs | 425 + .../opencontainers/go-digest/MAINTAINERS | 5 + .../opencontainers/go-digest/README.md | 96 + .../opencontainers/go-digest/algorithm.go | 193 + .../opencontainers/go-digest/digest.go | 157 + .../opencontainers/go-digest/digester.go | 40 + .../opencontainers/go-digest/doc.go | 62 + .../opencontainers/go-digest/go.mod | 3 + .../opencontainers/go-digest/verifiers.go | 46 + .../opencontainers/image-spec/LICENSE | 191 + .../image-spec/specs-go/v1/annotations.go | 62 + .../image-spec/specs-go/v1/config.go | 114 + .../image-spec/specs-go/v1/descriptor.go | 64 + .../image-spec/specs-go/v1/index.go | 32 + .../image-spec/specs-go/v1/layout.go | 28 + .../image-spec/specs-go/v1/manifest.go | 35 + .../image-spec/specs-go/v1/mediatype.go | 57 + .../image-spec/specs-go/version.go | 32 + .../image-spec/specs-go/versioned.go | 23 + vendor/github.com/opencontainers/runc/LICENSE | 191 + vendor/github.com/opencontainers/runc/NOTICE | 17 + .../runc/libcontainer/user/lookup_unix.go | 156 + .../runc/libcontainer/user/user.go | 604 + .../runc/libcontainer/user/user_fuzzer.go | 42 + .../opencontainers/runtime-spec/LICENSE | 191 + .../runtime-spec/specs-go/config.go | 700 + .../runtime-spec/specs-go/state.go | 56 + .../runtime-spec/specs-go/version.go | 18 + .../github.com/opencontainers/selinux/LICENSE | 201 + .../opencontainers/selinux/go-selinux/doc.go | 14 + .../selinux/go-selinux/label/label.go | 97 + .../selinux/go-selinux/label/label_linux.go | 193 + .../selinux/go-selinux/label/label_stub.go | 49 + .../selinux/go-selinux/selinux.go | 284 + .../selinux/go-selinux/selinux_linux.go | 1212 ++ .../selinux/go-selinux/selinux_stub.go | 154 + .../selinux/go-selinux/xattrs_linux.go | 38 + .../selinux/pkg/pwalk/README.md | 42 + .../opencontainers/selinux/pkg/pwalk/pwalk.go | 104 + vendor/github.com/ory/viper/.editorconfig | 15 + vendor/github.com/ory/viper/.gitignore | 4 + vendor/github.com/ory/viper/.golangci.yml | 27 + vendor/github.com/ory/viper/.travis.yml | 31 + vendor/github.com/ory/viper/LICENSE | 21 + vendor/github.com/ory/viper/Makefile | 76 + vendor/github.com/ory/viper/README.md | 808 ++ vendor/github.com/ory/viper/flags.go | 57 + vendor/github.com/ory/viper/go.mod | 27 + vendor/github.com/ory/viper/go.sum | 83 + vendor/github.com/ory/viper/util.go | 246 + vendor/github.com/ory/viper/viper.go | 2277 +++ .../github.com/rivo/tview/CODE_OF_CONDUCT.md | 73 + vendor/github.com/rivo/tview/CONTRIBUTING.md | 35 + vendor/github.com/rivo/tview/LICENSE.txt | 21 + vendor/github.com/rivo/tview/README.md | 116 + vendor/github.com/rivo/tview/ansi.go | 258 + vendor/github.com/rivo/tview/application.go | 766 ++ vendor/github.com/rivo/tview/borders.go | 45 + vendor/github.com/rivo/tview/box.go | 414 + vendor/github.com/rivo/tview/button.go | 157 + vendor/github.com/rivo/tview/checkbox.go | 240 + vendor/github.com/rivo/tview/doc.go | 179 + vendor/github.com/rivo/tview/dropdown.go | 564 + vendor/github.com/rivo/tview/flex.go | 239 + vendor/github.com/rivo/tview/form.go | 679 + vendor/github.com/rivo/tview/frame.go | 192 + vendor/github.com/rivo/tview/go.mod | 13 + vendor/github.com/rivo/tview/go.sum | 26 + vendor/github.com/rivo/tview/grid.go | 693 + vendor/github.com/rivo/tview/inputfield.go | 646 + vendor/github.com/rivo/tview/list.go | 705 + vendor/github.com/rivo/tview/modal.go | 201 + vendor/github.com/rivo/tview/pages.go | 315 + vendor/github.com/rivo/tview/primitive.go | 58 + vendor/github.com/rivo/tview/semigraphics.go | 294 + vendor/github.com/rivo/tview/styles.go | 35 + vendor/github.com/rivo/tview/table.go | 1298 ++ vendor/github.com/rivo/tview/textview.go | 1261 ++ vendor/github.com/rivo/tview/treeview.go | 826 ++ vendor/github.com/rivo/tview/tview.gif | Bin 0 -> 2226085 bytes vendor/github.com/rivo/tview/util.go | 658 + vendor/github.com/rivo/uniseg/LICENSE.txt | 21 + vendor/github.com/rivo/uniseg/README.md | 62 + vendor/github.com/rivo/uniseg/doc.go | 8 + vendor/github.com/rivo/uniseg/go.mod | 3 + vendor/github.com/rivo/uniseg/grapheme.go | 268 + vendor/github.com/rivo/uniseg/properties.go | 1658 +++ .../sabhiram/go-gitignore/.gitignore | 28 + .../sabhiram/go-gitignore/.travis.yml | 18 + .../github.com/sabhiram/go-gitignore/LICENSE | 22 + .../sabhiram/go-gitignore/README.md | 15 + .../sabhiram/go-gitignore/ignore.go | 225 + .../sabhiram/go-gitignore/version_gen.go | 12 + vendor/github.com/sergi/go-diff/AUTHORS | 25 + vendor/github.com/sergi/go-diff/CONTRIBUTORS | 32 + vendor/github.com/sergi/go-diff/LICENSE | 20 + .../sergi/go-diff/diffmatchpatch/diff.go | 1352 ++ .../go-diff/diffmatchpatch/diffmatchpatch.go | 46 + .../sergi/go-diff/diffmatchpatch/match.go | 160 + .../sergi/go-diff/diffmatchpatch/mathutil.go | 23 + .../diffmatchpatch/operation_string.go | 17 + .../sergi/go-diff/diffmatchpatch/patch.go | 556 + .../go-diff/diffmatchpatch/stringutil.go | 106 + vendor/github.com/sirupsen/logrus/.gitignore | 4 + .../github.com/sirupsen/logrus/.golangci.yml | 40 + vendor/github.com/sirupsen/logrus/.travis.yml | 15 + .../github.com/sirupsen/logrus/CHANGELOG.md | 259 + vendor/github.com/sirupsen/logrus/LICENSE | 21 + vendor/github.com/sirupsen/logrus/README.md | 513 + vendor/github.com/sirupsen/logrus/alt_exit.go | 76 + .../github.com/sirupsen/logrus/appveyor.yml | 14 + .../github.com/sirupsen/logrus/buffer_pool.go | 52 + vendor/github.com/sirupsen/logrus/doc.go | 26 + vendor/github.com/sirupsen/logrus/entry.go | 431 + vendor/github.com/sirupsen/logrus/exported.go | 270 + .../github.com/sirupsen/logrus/formatter.go | 78 + vendor/github.com/sirupsen/logrus/go.mod | 10 + vendor/github.com/sirupsen/logrus/go.sum | 8 + vendor/github.com/sirupsen/logrus/hooks.go | 34 + .../sirupsen/logrus/json_formatter.go | 128 + vendor/github.com/sirupsen/logrus/logger.go | 404 + vendor/github.com/sirupsen/logrus/logrus.go | 186 + .../logrus/terminal_check_appengine.go | 11 + .../sirupsen/logrus/terminal_check_bsd.go | 13 + .../sirupsen/logrus/terminal_check_js.go | 7 + .../logrus/terminal_check_no_terminal.go | 11 + .../logrus/terminal_check_notappengine.go | 17 + .../sirupsen/logrus/terminal_check_solaris.go | 11 + .../sirupsen/logrus/terminal_check_unix.go | 13 + .../sirupsen/logrus/terminal_check_windows.go | 27 + .../sirupsen/logrus/text_formatter.go | 339 + vendor/github.com/sirupsen/logrus/writer.go | 70 + vendor/github.com/src-d/gcfg/LICENSE | 28 + vendor/github.com/src-d/gcfg/README | 4 + vendor/github.com/src-d/gcfg/doc.go | 145 + vendor/github.com/src-d/gcfg/errors.go | 41 + vendor/github.com/src-d/gcfg/go1_0.go | 7 + vendor/github.com/src-d/gcfg/go1_2.go | 9 + vendor/github.com/src-d/gcfg/read.go | 273 + .../github.com/src-d/gcfg/scanner/errors.go | 121 + .../github.com/src-d/gcfg/scanner/scanner.go | 342 + vendor/github.com/src-d/gcfg/set.go | 332 + .../github.com/src-d/gcfg/token/position.go | 435 + .../github.com/src-d/gcfg/token/serialize.go | 56 + vendor/github.com/src-d/gcfg/token/token.go | 83 + vendor/github.com/src-d/gcfg/types/bool.go | 23 + vendor/github.com/src-d/gcfg/types/doc.go | 4 + vendor/github.com/src-d/gcfg/types/enum.go | 44 + vendor/github.com/src-d/gcfg/types/int.go | 86 + vendor/github.com/src-d/gcfg/types/scan.go | 23 + .../github.com/stoewer/go-strcase/.gitignore | 17 + .../stoewer/go-strcase/.golangci.yml | 26 + vendor/github.com/stoewer/go-strcase/LICENSE | 21 + .../github.com/stoewer/go-strcase/README.md | 50 + vendor/github.com/stoewer/go-strcase/camel.go | 37 + vendor/github.com/stoewer/go-strcase/doc.go | 8 + vendor/github.com/stoewer/go-strcase/go.mod | 5 + vendor/github.com/stoewer/go-strcase/go.sum | 11 + .../github.com/stoewer/go-strcase/helper.go | 71 + vendor/github.com/stoewer/go-strcase/kebab.go | 14 + vendor/github.com/stoewer/go-strcase/snake.go | 58 + vendor/github.com/syndtr/gocapability/LICENSE | 24 + .../gocapability/capability/capability.go | 133 + .../capability/capability_linux.go | 642 + .../capability/capability_noop.go | 19 + .../syndtr/gocapability/capability/enum.go | 309 + .../gocapability/capability/enum_gen.go | 138 + .../gocapability/capability/syscall_linux.go | 154 + vendor/github.com/tektoncd/cli/LICENSE | 202 + .../tektoncd/cli/pkg/actions/create.go | 36 + .../tektoncd/cli/pkg/actions/delete.go | 38 + .../tektoncd/cli/pkg/actions/get.go | 51 + .../tektoncd/cli/pkg/actions/gvr.go | 55 + .../tektoncd/cli/pkg/actions/list.go | 51 + .../tektoncd/cli/pkg/actions/patch.go | 38 + .../tektoncd/cli/pkg/actions/watch.go | 38 + .../tektoncd/cli/pkg/cli/interface.go | 64 + .../github.com/tektoncd/cli/pkg/cli/params.go | 195 + .../tektoncd/cli/pkg/formatted/address.go | 23 + .../tektoncd/cli/pkg/formatted/color.go | 176 + .../tektoncd/cli/pkg/formatted/completion.go | 31 + .../tektoncd/cli/pkg/formatted/conditions.go | 33 + .../tektoncd/cli/pkg/formatted/description.go | 24 + .../tektoncd/cli/pkg/formatted/k8s.go | 83 + .../tektoncd/cli/pkg/formatted/param.go | 91 + .../tektoncd/cli/pkg/formatted/results.go | 23 + .../tektoncd/cli/pkg/formatted/task.go | 26 + .../tektoncd/cli/pkg/formatted/time.go | 47 + .../tektoncd/cli/pkg/formatted/version.go | 22 + .../tektoncd/cli/pkg/formatted/workspace.go | 91 + .../cli/pkg/pipelinerun/pipelinerun.go | 211 + .../cli/pkg/pipelinerun/sort/by_namespace.go | 48 + .../cli/pkg/pipelinerun/sort/by_start_time.go | 39 + .../tektoncd/cli/pkg/pipelinerun/tracker.go | 166 + .../tektoncd/cli/pkg/printer/printer.go | 30 + .../github.com/tektoncd/cli/pkg/task/task.go | 211 + .../tektoncd/cli/pkg/task/tasklastrun.go | 74 + .../tektoncd/cli/pkg/taskrun/create.go | 127 + .../tektoncd/cli/pkg/taskrun/get.go | 77 + .../tektoncd/cli/pkg/taskrun/list/list.go | 89 + .../tektoncd/cli/pkg/taskrun/patch.go | 52 + .../cli/pkg/taskrun/sort/by_namespace.go | 48 + .../cli/pkg/taskrun/sort/by_start_time.go | 39 + .../tektoncd/cli/pkg/taskrun/taskrun.go | 109 + .../tektoncd/cli/pkg/taskrun/watch.go | 17 + vendor/github.com/tektoncd/pipeline/LICENSE | 202 + .../pkg/apis/config/artifact_bucket.go | 112 + .../pipeline/pkg/apis/config/artifact_pvc.go | 86 + .../pipeline/pkg/apis/config/default.go | 134 + .../tektoncd/pipeline/pkg/apis/config/doc.go | 17 + .../pipeline/pkg/apis/config/feature_flags.go | 170 + .../pipeline/pkg/apis/config/metrics.go | 148 + .../pipeline/pkg/apis/config/store.go | 133 + .../pkg/apis/config/zz_generated.deepcopy.go | 111 + .../pipeline/pkg/apis/pipeline/controller.go | 39 + .../pipeline/pkg/apis/pipeline/images.go | 74 + .../pipeline/pkg/apis/pipeline/options.go | 23 + .../pipeline/pkg/apis/pipeline/paths.go | 31 + .../pipeline/pkg/apis/pipeline/pod/doc.go | 20 + .../pkg/apis/pipeline/pod/template.go | 125 + .../pipeline/pod/zz_generated.deepcopy.go | 115 + .../pipeline/pkg/apis/pipeline/register.go | 99 + .../pkg/apis/pipeline/storagetypes.go | 25 + .../v1alpha1/cluster_task_conversion.go | 49 + .../v1alpha1/cluster_task_defaults.go | 30 + .../pipeline/v1alpha1/cluster_task_types.go | 64 + .../v1alpha1/cluster_task_validation.go | 37 + .../pipeline/v1alpha1/condition_defaults.go | 37 + .../apis/pipeline/v1alpha1/condition_types.go | 101 + .../pipeline/v1alpha1/condition_validation.go | 59 + .../v1alpha1/container_replacements.go | 74 + .../pipeline/v1alpha1/conversion_error.go | 31 + .../pkg/apis/pipeline/v1alpha1/doc.go | 23 + .../pkg/apis/pipeline/v1alpha1/param_types.go | 49 + .../pipeline/v1alpha1/pipeline_conversion.go | 177 + .../pipeline/v1alpha1/pipeline_defaults.go | 47 + .../pipeline/v1alpha1/pipeline_interface.go | 26 + .../v1alpha1/pipeline_resource_types.go | 74 + .../apis/pipeline/v1alpha1/pipeline_types.go | 259 + .../pipeline/v1alpha1/pipeline_validation.go | 335 + .../v1alpha1/pipelinerun_conversion.go | 116 + .../pipeline/v1alpha1/pipelinerun_defaults.go | 55 + .../pipeline/v1alpha1/pipelinerun_types.go | 223 + .../v1alpha1/pipelinerun_validation.go | 85 + .../pkg/apis/pipeline/v1alpha1/pod.go | 8 + .../pkg/apis/pipeline/v1alpha1/register.go | 68 + .../apis/pipeline/v1alpha1/resource_paths.go | 40 + .../apis/pipeline/v1alpha1/resource_types.go | 109 + .../apis/pipeline/v1alpha1/run_defaults.go | 45 + .../pkg/apis/pipeline/v1alpha1/run_types.go | 232 + .../apis/pipeline/v1alpha1/run_validation.go | 74 + .../pipeline/v1alpha1/step_replacements.go | 27 + .../pipeline/v1alpha1/storage_resource.go | 24 + .../apis/pipeline/v1alpha1/task_conversion.go | 127 + .../apis/pipeline/v1alpha1/task_defaults.go | 47 + .../apis/pipeline/v1alpha1/task_interface.go | 26 + .../pkg/apis/pipeline/v1alpha1/task_types.go | 143 + .../apis/pipeline/v1alpha1/task_validation.go | 437 + .../pipeline/v1alpha1/taskrun_conversion.go | 149 + .../pipeline/v1alpha1/taskrun_defaults.go | 74 + .../apis/pipeline/v1alpha1/taskrun_types.go | 265 + .../pipeline/v1alpha1/taskrun_validation.go | 174 + .../apis/pipeline/v1alpha1/workspace_types.go | 35 + .../pipeline/v1alpha1/workspace_validation.go | 17 + .../v1alpha1/zz_generated.deepcopy.go | 1024 ++ .../v1beta1/cluster_task_conversion.go | 36 + .../pipeline/v1beta1/cluster_task_defaults.go | 30 + .../pipeline/v1beta1/cluster_task_types.go | 75 + .../v1beta1/cluster_task_validation.go | 35 + .../apis/pipeline/v1beta1/condition_types.go | 75 + .../v1beta1/container_replacements.go | 74 + .../apis/pipeline/v1beta1/conversion_error.go | 28 + .../pipeline/pkg/apis/pipeline/v1beta1/doc.go | 23 + .../pkg/apis/pipeline/v1beta1/merge.go | 91 + .../pipeline/v1beta1/openapi_generated.go | 4738 +++++++ .../apis/pipeline/v1beta1/param_context.go | 202 + .../pkg/apis/pipeline/v1beta1/param_types.go | 179 + .../pipeline/v1beta1/pipeline_conversion.go | 36 + .../pipeline/v1beta1/pipeline_defaults.go | 75 + .../pipeline/v1beta1/pipeline_interface.go | 30 + .../apis/pipeline/v1beta1/pipeline_types.go | 509 + .../pipeline/v1beta1/pipeline_validation.go | 548 + .../v1beta1/pipelinerun_conversion.go | 36 + .../pipeline/v1beta1/pipelinerun_defaults.go | 64 + .../pipeline/v1beta1/pipelinerun_types.go | 532 + .../v1beta1/pipelinerun_validation.go | 187 + .../pipeline/pkg/apis/pipeline/v1beta1/pod.go | 22 + .../pkg/apis/pipeline/v1beta1/register.go | 65 + .../apis/pipeline/v1beta1/resource_paths.go | 40 + .../apis/pipeline/v1beta1/resource_types.go | 281 + .../v1beta1/resource_types_validation.go | 102 + .../pkg/apis/pipeline/v1beta1/resultref.go | 155 + .../pipeline/v1beta1/sidecar_replacements.go | 27 + .../pipeline/v1beta1/step_replacements.go | 27 + .../pkg/apis/pipeline/v1beta1/swagger.json | 2641 ++++ .../apis/pipeline/v1beta1/task_conversion.go | 36 + .../apis/pipeline/v1beta1/task_defaults.go | 41 + .../apis/pipeline/v1beta1/task_interface.go | 30 + .../pkg/apis/pipeline/v1beta1/task_types.go | 221 + .../apis/pipeline/v1beta1/task_validation.go | 400 + .../pipeline/v1beta1/taskrun_conversion.go | 36 + .../apis/pipeline/v1beta1/taskrun_defaults.go | 138 + .../apis/pipeline/v1beta1/taskrun_types.go | 438 + .../pipeline/v1beta1/taskrun_validation.go | 146 + .../pipeline/v1beta1/version_validation.go | 38 + .../pkg/apis/pipeline/v1beta1/when_types.go | 109 + .../apis/pipeline/v1beta1/when_validation.go | 92 + .../apis/pipeline/v1beta1/workspace_types.go | 123 + .../pipeline/v1beta1/workspace_validation.go | 92 + .../pipeline/v1beta1/zz_generated.deepcopy.go | 1995 +++ .../pkg/apis/resource/v1alpha1/doc.go | 23 + .../v1alpha1/pipeline_resource_defaults.go | 34 + .../v1alpha1/pipeline_resource_types.go | 153 + .../v1alpha1/pipelineresource_validation.go | 157 + .../pkg/apis/resource/v1alpha1/register.go | 54 + .../v1alpha1/zz_generated.deepcopy.go | 181 + .../pkg/apis/run/v1alpha1/runstatus_types.go | 148 + .../run/v1alpha1/zz_generated.deepcopy.go | 77 + .../pipeline/pkg/apis/validate/metadata.go | 48 + .../client/clientset/versioned/clientset.go | 111 + .../pkg/client/clientset/versioned/doc.go | 20 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../typed/pipeline/v1alpha1/clustertask.go | 168 + .../typed/pipeline/v1alpha1/condition.go | 178 + .../versioned/typed/pipeline/v1alpha1/doc.go | 20 + .../pipeline/v1alpha1/generated_expansion.go | 33 + .../typed/pipeline/v1alpha1/pipeline.go | 178 + .../pipeline/v1alpha1/pipeline_client.go | 119 + .../typed/pipeline/v1alpha1/pipelinerun.go | 195 + .../versioned/typed/pipeline/v1alpha1/run.go | 195 + .../versioned/typed/pipeline/v1alpha1/task.go | 178 + .../typed/pipeline/v1alpha1/taskrun.go | 195 + .../typed/pipeline/v1beta1/clustertask.go | 168 + .../versioned/typed/pipeline/v1beta1/doc.go | 20 + .../pipeline/v1beta1/generated_expansion.go | 29 + .../typed/pipeline/v1beta1/pipeline.go | 178 + .../typed/pipeline/v1beta1/pipeline_client.go | 109 + .../typed/pipeline/v1beta1/pipelinerun.go | 195 + .../versioned/typed/pipeline/v1beta1/task.go | 178 + .../typed/pipeline/v1beta1/taskrun.go | 195 + .../informers/externalversions/factory.go | 180 + .../informers/externalversions/generic.go | 87 + .../internalinterfaces/factory_interfaces.go | 40 + .../externalversions/pipeline/interface.go | 54 + .../pipeline/v1alpha1/clustertask.go | 89 + .../pipeline/v1alpha1/condition.go | 90 + .../pipeline/v1alpha1/interface.go | 87 + .../pipeline/v1alpha1/pipeline.go | 90 + .../pipeline/v1alpha1/pipelinerun.go | 90 + .../externalversions/pipeline/v1alpha1/run.go | 90 + .../pipeline/v1alpha1/task.go | 90 + .../pipeline/v1alpha1/taskrun.go | 90 + .../pipeline/v1beta1/clustertask.go | 89 + .../pipeline/v1beta1/interface.go | 73 + .../pipeline/v1beta1/pipeline.go | 90 + .../pipeline/v1beta1/pipelinerun.go | 90 + .../externalversions/pipeline/v1beta1/task.go | 90 + .../pipeline/v1beta1/taskrun.go | 90 + .../listers/pipeline/v1alpha1/clustertask.go | 68 + .../listers/pipeline/v1alpha1/condition.go | 99 + .../pipeline/v1alpha1/expansion_generated.go | 71 + .../listers/pipeline/v1alpha1/pipeline.go | 99 + .../listers/pipeline/v1alpha1/pipelinerun.go | 99 + .../client/listers/pipeline/v1alpha1/run.go | 99 + .../client/listers/pipeline/v1alpha1/task.go | 99 + .../listers/pipeline/v1alpha1/taskrun.go | 99 + .../listers/pipeline/v1beta1/clustertask.go | 68 + .../pipeline/v1beta1/expansion_generated.go | 55 + .../listers/pipeline/v1beta1/pipeline.go | 99 + .../listers/pipeline/v1beta1/pipelinerun.go | 99 + .../client/listers/pipeline/v1beta1/task.go | 99 + .../listers/pipeline/v1beta1/taskrun.go | 99 + .../resource/clientset/versioned/clientset.go | 97 + .../resource/clientset/versioned/doc.go | 20 + .../clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 56 + .../versioned/typed/resource/v1alpha1/doc.go | 20 + .../resource/v1alpha1/generated_expansion.go | 21 + .../resource/v1alpha1/pipelineresource.go | 178 + .../resource/v1alpha1/resource_client.go | 89 + .../tektoncd/pipeline/pkg/list/diff.go | 52 + .../pkg/reconciler/pipeline/dag/dag.go | 218 + .../pipeline/pkg/substitution/substitution.go | 209 + vendor/github.com/tektoncd/triggers/LICENSE | 201 + .../triggers/pkg/apis/config/allowed_types.go | 30 + .../triggers/pkg/apis/config/default.go | 74 + .../tektoncd/triggers/pkg/apis/config/doc.go | 17 + .../triggers/pkg/apis/config/feature_flags.go | 103 + .../triggers/pkg/apis/config/store.go | 106 + .../pkg/apis/config/zz_generated.deepcopy.go | 53 + .../pkg/apis/triggers/contexts/contexts.go | 38 + .../triggers/pkg/apis/triggers/register.go | 18 + .../v1alpha1/cluster_interceptor_defaults.go | 34 + .../v1alpha1/cluster_interceptor_types.go | 125 + .../cluster_interceptor_validation.go | 46 + .../cluster_trigger_binding_defaults.go | 24 + .../v1alpha1/cluster_trigger_binding_types.go | 69 + .../cluster_trigger_binding_validation.go | 34 + .../pkg/apis/triggers/v1alpha1/doc.go | 23 + .../v1alpha1/event_listener_defaults.go | 55 + .../triggers/v1alpha1/event_listener_types.go | 320 + .../v1alpha1/event_listener_validation.go | 299 + .../triggers/v1alpha1/interceptor_types.go | 88 + .../pkg/apis/triggers/v1alpha1/param.go | 21 + .../pkg/apis/triggers/v1alpha1/register.go | 64 + .../v1alpha1/trigger_binding_defaults.go | 24 + .../v1alpha1/trigger_binding_interface.go | 26 + .../v1alpha1/trigger_binding_types.go | 76 + .../v1alpha1/trigger_binding_validation.go | 98 + .../triggers/v1alpha1/trigger_defaults.go | 54 + .../v1alpha1/trigger_template_defaults.go | 24 + .../v1alpha1/trigger_template_types.go | 67 + .../v1alpha1/trigger_template_validation.go | 109 + .../apis/triggers/v1alpha1/trigger_types.go | 284 + .../v1alpha1/trigger_types_convert.go | 41 + .../triggers/v1alpha1/trigger_validation.go | 171 + .../v1alpha1/zz_generated.deepcopy.go | 1193 ++ .../cluster_trigger_binding_defaults.go | 24 + .../v1beta1/cluster_trigger_binding_types.go | 69 + .../cluster_trigger_binding_validation.go | 34 + .../triggers/pkg/apis/triggers/v1beta1/doc.go | 23 + .../v1beta1/event_listener_defaults.go | 49 + .../triggers/v1beta1/event_listener_types.go | 346 + .../v1beta1/event_listener_validation.go | 322 + .../triggers/v1beta1/interceptor_types.go | 88 + .../pkg/apis/triggers/v1beta1/param.go | 21 + .../pkg/apis/triggers/v1beta1/register.go | 62 + .../v1beta1/trigger_binding_defaults.go | 24 + .../v1beta1/trigger_binding_interface.go | 26 + .../triggers/v1beta1/trigger_binding_types.go | 76 + .../v1beta1/trigger_binding_validation.go | 97 + .../apis/triggers/v1beta1/trigger_defaults.go | 47 + .../v1beta1/trigger_template_defaults.go | 24 + .../v1beta1/trigger_template_types.go | 67 + .../v1beta1/trigger_template_validation.go | 109 + .../apis/triggers/v1beta1/trigger_types.go | 193 + .../triggers/v1beta1/trigger_types_convert.go | 41 + .../triggers/v1beta1/trigger_validation.go | 136 + .../triggers/v1beta1/version_validation.go | 38 + .../triggers/v1beta1/zz_generated.deepcopy.go | 1117 ++ .../triggers/pkg/apis/triggers/validation.go | 39 + .../client/clientset/versioned/clientset.go | 111 + .../pkg/client/clientset/versioned/doc.go | 20 + .../client/clientset/versioned/scheme/doc.go | 20 + .../clientset/versioned/scheme/register.go | 58 + .../triggers/v1alpha1/clusterinterceptor.go | 184 + .../v1alpha1/clustertriggerbinding.go | 184 + .../versioned/typed/triggers/v1alpha1/doc.go | 20 + .../typed/triggers/v1alpha1/eventlistener.go | 195 + .../triggers/v1alpha1/generated_expansion.go | 31 + .../typed/triggers/v1alpha1/trigger.go | 178 + .../typed/triggers/v1alpha1/triggerbinding.go | 195 + .../triggers/v1alpha1/triggers_client.go | 114 + .../triggers/v1alpha1/triggertemplate.go | 195 + .../triggers/v1beta1/clustertriggerbinding.go | 184 + .../versioned/typed/triggers/v1beta1/doc.go | 20 + .../typed/triggers/v1beta1/eventlistener.go | 195 + .../triggers/v1beta1/generated_expansion.go | 29 + .../typed/triggers/v1beta1/trigger.go | 178 + .../typed/triggers/v1beta1/triggerbinding.go | 195 + .../typed/triggers/v1beta1/triggers_client.go | 109 + .../typed/triggers/v1beta1/triggertemplate.go | 195 + vendor/github.com/vbatts/tar-split/LICENSE | 28 + .../vbatts/tar-split/archive/tar/common.go | 723 + .../vbatts/tar-split/archive/tar/format.go | 303 + .../vbatts/tar-split/archive/tar/reader.go | 923 ++ .../tar-split/archive/tar/stat_actime1.go | 20 + .../tar-split/archive/tar/stat_actime2.go | 20 + .../vbatts/tar-split/archive/tar/stat_unix.go | 96 + .../vbatts/tar-split/archive/tar/strconv.go | 326 + .../vbatts/tar-split/archive/tar/writer.go | 653 + vendor/github.com/whilp/git-urls/.travis.yml | 7 + vendor/github.com/whilp/git-urls/LICENSE | 21 + vendor/github.com/whilp/git-urls/Makefile | 16 + vendor/github.com/whilp/git-urls/README.md | 3 + vendor/github.com/whilp/git-urls/go.mod | 3 + vendor/github.com/whilp/git-urls/urls.go | 150 + vendor/github.com/xanzy/ssh-agent/.gitignore | 24 + vendor/github.com/xanzy/ssh-agent/LICENSE | 202 + vendor/github.com/xanzy/ssh-agent/README.md | 23 + vendor/github.com/xanzy/ssh-agent/go.mod | 6 + vendor/github.com/xanzy/ssh-agent/go.sum | 16 + .../xanzy/ssh-agent/pageant_windows.go | 143 + vendor/github.com/xanzy/ssh-agent/sshagent.go | 49 + .../xanzy/ssh-agent/sshagent_windows.go | 103 + vendor/go.uber.org/multierr/.travis.yml | 23 - vendor/go.uber.org/multierr/CHANGELOG.md | 6 + vendor/go.uber.org/multierr/LICENSE.txt | 2 +- vendor/go.uber.org/multierr/Makefile | 6 +- vendor/go.uber.org/multierr/README.md | 8 +- vendor/go.uber.org/multierr/error.go | 208 +- vendor/go.uber.org/multierr/go.mod | 5 +- vendor/go.uber.org/multierr/go.sum | 9 +- vendor/go.uber.org/multierr/go113.go | 52 - vendor/golang.org/x/crypto/blowfish/block.go | 159 + vendor/golang.org/x/crypto/blowfish/cipher.go | 99 + vendor/golang.org/x/crypto/blowfish/const.go | 199 + vendor/golang.org/x/crypto/cast5/cast5.go | 533 + .../x/crypto/chacha20/chacha_arm64.go | 17 + .../x/crypto/chacha20/chacha_arm64.s | 308 + .../x/crypto/chacha20/chacha_generic.go | 398 + .../x/crypto/chacha20/chacha_noasm.go | 14 + .../x/crypto/chacha20/chacha_ppc64le.go | 17 + .../x/crypto/chacha20/chacha_ppc64le.s | 450 + .../x/crypto/chacha20/chacha_s390x.go | 27 + .../x/crypto/chacha20/chacha_s390x.s | 225 + vendor/golang.org/x/crypto/chacha20/xor.go | 42 + .../x/crypto/curve25519/curve25519.go | 145 + .../x/crypto/curve25519/internal/field/README | 7 + .../x/crypto/curve25519/internal/field/fe.go | 416 + .../curve25519/internal/field/fe_amd64.go | 13 + .../curve25519/internal/field/fe_amd64.s | 379 + .../internal/field/fe_amd64_noasm.go | 12 + .../curve25519/internal/field/fe_arm64.go | 16 + .../curve25519/internal/field/fe_arm64.s | 43 + .../internal/field/fe_arm64_noasm.go | 12 + .../curve25519/internal/field/fe_generic.go | 264 + .../curve25519/internal/field/sync.checkpoint | 1 + .../crypto/curve25519/internal/field/sync.sh | 19 + vendor/golang.org/x/crypto/ed25519/ed25519.go | 223 + .../x/crypto/ed25519/ed25519_go113.go | 74 + .../ed25519/internal/edwards25519/const.go | 1422 ++ .../internal/edwards25519/edwards25519.go | 1793 +++ .../x/crypto/internal/poly1305/bits_compat.go | 40 + .../x/crypto/internal/poly1305/bits_go1.13.go | 22 + .../x/crypto/internal/poly1305/mac_noasm.go | 10 + .../x/crypto/internal/poly1305/poly1305.go | 99 + .../x/crypto/internal/poly1305/sum_amd64.go | 48 + .../x/crypto/internal/poly1305/sum_amd64.s | 109 + .../x/crypto/internal/poly1305/sum_generic.go | 310 + .../x/crypto/internal/poly1305/sum_ppc64le.go | 48 + .../x/crypto/internal/poly1305/sum_ppc64le.s | 182 + .../x/crypto/internal/poly1305/sum_s390x.go | 76 + .../x/crypto/internal/poly1305/sum_s390x.s | 504 + .../x/crypto/internal/subtle/aliasing.go | 33 + .../crypto/internal/subtle/aliasing_purego.go | 36 + .../x/crypto/openpgp/armor/armor.go | 230 + .../x/crypto/openpgp/armor/encode.go | 160 + .../x/crypto/openpgp/canonical_text.go | 59 + .../x/crypto/openpgp/elgamal/elgamal.go | 130 + .../x/crypto/openpgp/errors/errors.go | 78 + vendor/golang.org/x/crypto/openpgp/keys.go | 693 + .../x/crypto/openpgp/packet/compressed.go | 123 + .../x/crypto/openpgp/packet/config.go | 91 + .../x/crypto/openpgp/packet/encrypted_key.go | 208 + .../x/crypto/openpgp/packet/literal.go | 89 + .../x/crypto/openpgp/packet/ocfb.go | 143 + .../openpgp/packet/one_pass_signature.go | 73 + .../x/crypto/openpgp/packet/opaque.go | 162 + .../x/crypto/openpgp/packet/packet.go | 590 + .../x/crypto/openpgp/packet/private_key.go | 385 + .../x/crypto/openpgp/packet/public_key.go | 753 + .../x/crypto/openpgp/packet/public_key_v3.go | 279 + .../x/crypto/openpgp/packet/reader.go | 76 + .../x/crypto/openpgp/packet/signature.go | 731 + .../x/crypto/openpgp/packet/signature_v3.go | 146 + .../openpgp/packet/symmetric_key_encrypted.go | 155 + .../openpgp/packet/symmetrically_encrypted.go | 290 + .../x/crypto/openpgp/packet/userattribute.go | 91 + .../x/crypto/openpgp/packet/userid.go | 160 + vendor/golang.org/x/crypto/openpgp/read.go | 448 + vendor/golang.org/x/crypto/openpgp/s2k/s2k.go | 279 + vendor/golang.org/x/crypto/openpgp/write.go | 418 + .../golang.org/x/crypto/ssh/agent/client.go | 813 ++ .../golang.org/x/crypto/ssh/agent/forward.go | 103 + .../golang.org/x/crypto/ssh/agent/keyring.go | 241 + .../golang.org/x/crypto/ssh/agent/server.go | 570 + vendor/golang.org/x/crypto/ssh/buffer.go | 97 + vendor/golang.org/x/crypto/ssh/certs.go | 546 + vendor/golang.org/x/crypto/ssh/channel.go | 633 + vendor/golang.org/x/crypto/ssh/cipher.go | 781 ++ vendor/golang.org/x/crypto/ssh/client.go | 278 + vendor/golang.org/x/crypto/ssh/client_auth.go | 641 + vendor/golang.org/x/crypto/ssh/common.go | 404 + vendor/golang.org/x/crypto/ssh/connection.go | 143 + vendor/golang.org/x/crypto/ssh/doc.go | 21 + vendor/golang.org/x/crypto/ssh/handshake.go | 647 + .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 + vendor/golang.org/x/crypto/ssh/kex.go | 782 ++ vendor/golang.org/x/crypto/ssh/keys.go | 1474 ++ .../x/crypto/ssh/knownhosts/knownhosts.go | 540 + vendor/golang.org/x/crypto/ssh/mac.go | 61 + vendor/golang.org/x/crypto/ssh/messages.go | 866 ++ vendor/golang.org/x/crypto/ssh/mux.go | 351 + vendor/golang.org/x/crypto/ssh/server.go | 720 + vendor/golang.org/x/crypto/ssh/session.go | 647 + vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 + vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 + vendor/golang.org/x/crypto/ssh/tcpip.go | 474 + vendor/golang.org/x/crypto/ssh/transport.go | 353 + vendor/golang.org/x/net/http2/server.go | 10 +- vendor/golang.org/x/net/http2/transport.go | 132 +- vendor/golang.org/x/net/http2/writesched.go | 4 +- .../x/net/http2/writesched_random.go | 6 +- vendor/golang.org/x/net/idna/go118.go | 14 + vendor/golang.org/x/net/idna/idna10.0.0.go | 6 +- vendor/golang.org/x/net/idna/idna9.0.0.go | 4 +- vendor/golang.org/x/net/idna/pre_go118.go | 12 + vendor/golang.org/x/net/idna/punycode.go | 36 +- vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s | 18 + vendor/golang.org/x/sys/cpu/byteorder.go | 65 + vendor/golang.org/x/sys/cpu/cpu.go | 287 + vendor/golang.org/x/sys/cpu/cpu_aix.go | 34 + vendor/golang.org/x/sys/cpu/cpu_arm.go | 73 + vendor/golang.org/x/sys/cpu/cpu_arm64.go | 172 + vendor/golang.org/x/sys/cpu/cpu_arm64.s | 32 + .../cpu/cpu_gc_arm64.go} | 10 +- vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go | 22 + vendor/golang.org/x/sys/cpu/cpu_gc_x86.go | 21 + .../golang.org/x/sys/cpu/cpu_gccgo_arm64.go | 12 + .../golang.org/x/sys/cpu/cpu_gccgo_s390x.go | 23 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c | 43 + vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go | 33 + vendor/golang.org/x/sys/cpu/cpu_linux.go | 16 + vendor/golang.org/x/sys/cpu/cpu_linux_arm.go | 39 + .../golang.org/x/sys/cpu/cpu_linux_arm64.go | 71 + .../golang.org/x/sys/cpu/cpu_linux_mips64x.go | 24 + .../golang.org/x/sys/cpu/cpu_linux_noinit.go | 10 + .../golang.org/x/sys/cpu/cpu_linux_ppc64x.go | 32 + .../golang.org/x/sys/cpu/cpu_linux_s390x.go | 40 + vendor/golang.org/x/sys/cpu/cpu_mips64x.go | 16 + vendor/golang.org/x/sys/cpu/cpu_mipsx.go | 12 + .../golang.org/x/sys/cpu/cpu_netbsd_arm64.go | 173 + vendor/golang.org/x/sys/cpu/cpu_other_arm.go | 10 + .../cpu/cpu_other_arm64.go} | 8 +- .../golang.org/x/sys/cpu/cpu_other_mips64x.go | 13 + vendor/golang.org/x/sys/cpu/cpu_ppc64x.go | 17 + vendor/golang.org/x/sys/cpu/cpu_riscv64.go | 12 + vendor/golang.org/x/sys/cpu/cpu_s390x.go | 172 + vendor/golang.org/x/sys/cpu/cpu_s390x.s | 58 + vendor/golang.org/x/sys/cpu/cpu_wasm.go | 18 + vendor/golang.org/x/sys/cpu/cpu_x86.go | 145 + vendor/golang.org/x/sys/cpu/cpu_x86.s | 28 + vendor/golang.org/x/sys/cpu/cpu_zos.go | 10 + vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go | 25 + vendor/golang.org/x/sys/cpu/hwcap_linux.go | 56 + .../golang.org/x/sys/cpu/syscall_aix_gccgo.go | 27 + .../x/sys/cpu/syscall_aix_ppc64_gc.go | 36 + vendor/golang.org/x/term/go.mod | 4 +- vendor/golang.org/x/term/go.sum | 4 +- vendor/golang.org/x/term/term.go | 4 +- vendor/golang.org/x/term/term_solaris.go | 111 - vendor/golang.org/x/term/term_unix.go | 4 +- .../{term_unix_zos.go => term_unix_other.go} | 5 +- .../api/expr/v1alpha1/checked.pb.go | 1657 +++ .../googleapis/api/expr/v1alpha1/eval.pb.go | 579 + .../api/expr/v1alpha1/explain.pb.go | 275 + .../googleapis/api/expr/v1alpha1/syntax.pb.go | 1685 +++ .../googleapis/api/expr/v1alpha1/value.pb.go | 720 + .../protobuf/types/dynamicpb/dynamic.go | 713 + .../protobuf/types/known/emptypb/empty.pb.go | 168 + .../types/known/structpb/struct.pb.go | 810 ++ vendor/gopkg.in/natefinch/npipe.v2/.gitignore | 22 + .../gopkg.in/natefinch/npipe.v2/LICENSE.txt | 8 + vendor/gopkg.in/natefinch/npipe.v2/README.md | 308 + vendor/gopkg.in/natefinch/npipe.v2/doc.go | 50 + .../natefinch/npipe.v2/npipe_windows.go | 531 + .../natefinch/npipe.v2/znpipe_windows_386.go | 124 + .../npipe.v2/znpipe_windows_amd64.go | 124 + vendor/gopkg.in/src-d/go-billy.v4/.gitignore | 4 + vendor/gopkg.in/src-d/go-billy.v4/.travis.yml | 17 + vendor/gopkg.in/src-d/go-billy.v4/DCO | 25 + vendor/gopkg.in/src-d/go-billy.v4/LICENSE | 201 + vendor/gopkg.in/src-d/go-billy.v4/MAINTAINERS | 1 + vendor/gopkg.in/src-d/go-billy.v4/Makefile | 25 + vendor/gopkg.in/src-d/go-billy.v4/README.md | 72 + .../gopkg.in/src-d/go-billy.v4/appveyor.yml | 15 + vendor/gopkg.in/src-d/go-billy.v4/fs.go | 202 + vendor/gopkg.in/src-d/go-billy.v4/go.mod | 8 + vendor/gopkg.in/src-d/go-billy.v4/go.sum | 12 + .../src-d/go-billy.v4/helper/chroot/chroot.go | 242 + .../go-billy.v4/helper/polyfill/polyfill.go | 105 + vendor/gopkg.in/src-d/go-billy.v4/osfs/os.go | 139 + .../src-d/go-billy.v4/osfs/os_posix.go | 21 + .../src-d/go-billy.v4/osfs/os_windows.go | 57 + .../gopkg.in/src-d/go-billy.v4/util/glob.go | 111 + .../gopkg.in/src-d/go-billy.v4/util/util.go | 224 + vendor/gopkg.in/src-d/go-git.v4/.gitignore | 4 + vendor/gopkg.in/src-d/go-git.v4/.travis.yml | 37 + .../src-d/go-git.v4/CODE_OF_CONDUCT.md | 74 + .../gopkg.in/src-d/go-git.v4/COMPATIBILITY.md | 111 + .../gopkg.in/src-d/go-git.v4/CONTRIBUTING.md | 59 + vendor/gopkg.in/src-d/go-git.v4/DCO | 36 + vendor/gopkg.in/src-d/go-git.v4/LICENSE | 201 + vendor/gopkg.in/src-d/go-git.v4/MAINTAINERS | 3 + vendor/gopkg.in/src-d/go-git.v4/Makefile | 52 + vendor/gopkg.in/src-d/go-git.v4/README.md | 123 + vendor/gopkg.in/src-d/go-git.v4/appveyor.yml | 21 + vendor/gopkg.in/src-d/go-git.v4/blame.go | 302 + vendor/gopkg.in/src-d/go-git.v4/common.go | 22 + .../gopkg.in/src-d/go-git.v4/config/branch.go | 90 + .../gopkg.in/src-d/go-git.v4/config/config.go | 407 + .../src-d/go-git.v4/config/modules.go | 139 + .../src-d/go-git.v4/config/refspec.go | 150 + vendor/gopkg.in/src-d/go-git.v4/doc.go | 10 + vendor/gopkg.in/src-d/go-git.v4/go.mod | 29 + vendor/gopkg.in/src-d/go-git.v4/go.sum | 92 + .../go-git.v4/internal/revision/parser.go | 622 + .../go-git.v4/internal/revision/scanner.go | 117 + .../go-git.v4/internal/revision/token.go | 28 + .../src-d/go-git.v4/internal/url/url.go | 37 + .../gopkg.in/src-d/go-git.v4/object_walker.go | 104 + vendor/gopkg.in/src-d/go-git.v4/options.go | 492 + .../go-git.v4/plumbing/cache/buffer_lru.go | 98 + .../src-d/go-git.v4/plumbing/cache/common.go | 39 + .../go-git.v4/plumbing/cache/object_lru.go | 101 + .../src-d/go-git.v4/plumbing/error.go | 35 + .../go-git.v4/plumbing/filemode/filemode.go | 188 + .../plumbing/format/config/common.go | 99 + .../plumbing/format/config/decoder.go | 37 + .../go-git.v4/plumbing/format/config/doc.go | 122 + .../plumbing/format/config/encoder.go | 77 + .../plumbing/format/config/option.go | 117 + .../plumbing/format/config/section.go | 146 + .../go-git.v4/plumbing/format/diff/patch.go | 58 + .../plumbing/format/diff/unified_encoder.go | 360 + .../plumbing/format/gitignore/dir.go | 136 + .../plumbing/format/gitignore/doc.go | 70 + .../plumbing/format/gitignore/matcher.go | 30 + .../plumbing/format/gitignore/pattern.go | 153 + .../plumbing/format/idxfile/decoder.go | 177 + .../go-git.v4/plumbing/format/idxfile/doc.go | 128 + .../plumbing/format/idxfile/encoder.go | 142 + .../plumbing/format/idxfile/idxfile.go | 346 + .../plumbing/format/idxfile/writer.go | 186 + .../plumbing/format/index/decoder.go | 477 + .../go-git.v4/plumbing/format/index/doc.go | 360 + .../plumbing/format/index/encoder.go | 150 + .../go-git.v4/plumbing/format/index/index.go | 213 + .../go-git.v4/plumbing/format/index/match.go | 186 + .../go-git.v4/plumbing/format/objfile/doc.go | 2 + .../plumbing/format/objfile/reader.go | 114 + .../plumbing/format/objfile/writer.go | 109 + .../plumbing/format/packfile/common.go | 78 + .../plumbing/format/packfile/delta_index.go | 297 + .../format/packfile/delta_selector.go | 369 + .../plumbing/format/packfile/diff_delta.go | 200 + .../go-git.v4/plumbing/format/packfile/doc.go | 39 + .../plumbing/format/packfile/encoder.go | 219 + .../plumbing/format/packfile/error.go | 30 + .../plumbing/format/packfile/fsobject.go | 116 + .../plumbing/format/packfile/object_pack.go | 164 + .../plumbing/format/packfile/packfile.go | 562 + .../plumbing/format/packfile/parser.go | 483 + .../plumbing/format/packfile/patch_delta.go | 229 + .../plumbing/format/packfile/scanner.go | 466 + .../plumbing/format/pktline/encoder.go | 122 + .../plumbing/format/pktline/scanner.go | 134 + .../gopkg.in/src-d/go-git.v4/plumbing/hash.go | 73 + .../src-d/go-git.v4/plumbing/memory.go | 61 + .../src-d/go-git.v4/plumbing/object.go | 111 + .../src-d/go-git.v4/plumbing/object/blob.go | 144 + .../src-d/go-git.v4/plumbing/object/change.go | 157 + .../plumbing/object/change_adaptor.go | 61 + .../src-d/go-git.v4/plumbing/object/commit.go | 430 + .../plumbing/object/commit_walker.go | 327 + .../plumbing/object/commit_walker_bfs.go | 100 + .../object/commit_walker_bfs_filtered.go | 176 + .../plumbing/object/commit_walker_ctime.go | 103 + .../plumbing/object/commit_walker_file.go | 145 + .../src-d/go-git.v4/plumbing/object/common.go | 12 + .../go-git.v4/plumbing/object/difftree.go | 37 + .../src-d/go-git.v4/plumbing/object/file.go | 137 + .../go-git.v4/plumbing/object/merge_base.go | 210 + .../src-d/go-git.v4/plumbing/object/object.go | 237 + .../src-d/go-git.v4/plumbing/object/patch.go | 346 + .../src-d/go-git.v4/plumbing/object/tag.go | 357 + .../src-d/go-git.v4/plumbing/object/tree.go | 520 + .../go-git.v4/plumbing/object/treenoder.go | 136 + .../plumbing/protocol/packp/advrefs.go | 203 + .../plumbing/protocol/packp/advrefs_decode.go | 288 + .../plumbing/protocol/packp/advrefs_encode.go | 176 + .../protocol/packp/capability/capability.go | 252 + .../protocol/packp/capability/list.go | 196 + .../plumbing/protocol/packp/common.go | 70 + .../go-git.v4/plumbing/protocol/packp/doc.go | 724 + .../plumbing/protocol/packp/report_status.go | 165 + .../plumbing/protocol/packp/shallowupd.go | 92 + .../protocol/packp/sideband/common.go | 33 + .../plumbing/protocol/packp/sideband/demux.go | 148 + .../plumbing/protocol/packp/sideband/doc.go | 31 + .../plumbing/protocol/packp/sideband/muxer.go | 65 + .../plumbing/protocol/packp/srvresp.go | 127 + .../plumbing/protocol/packp/ulreq.go | 168 + .../plumbing/protocol/packp/ulreq_decode.go | 257 + .../plumbing/protocol/packp/ulreq_encode.go | 145 + .../plumbing/protocol/packp/updreq.go | 122 + .../plumbing/protocol/packp/updreq_decode.go | 250 + .../plumbing/protocol/packp/updreq_encode.go | 75 + .../plumbing/protocol/packp/uppackreq.go | 98 + .../plumbing/protocol/packp/uppackresp.go | 109 + .../src-d/go-git.v4/plumbing/reference.go | 209 + .../src-d/go-git.v4/plumbing/revision.go | 11 + .../go-git.v4/plumbing/revlist/revlist.go | 230 + .../src-d/go-git.v4/plumbing/storer/doc.go | 2 + .../src-d/go-git.v4/plumbing/storer/index.go | 9 + .../src-d/go-git.v4/plumbing/storer/object.go | 288 + .../go-git.v4/plumbing/storer/reference.go | 240 + .../go-git.v4/plumbing/storer/shallow.go | 10 + .../src-d/go-git.v4/plumbing/storer/storer.go | 15 + .../plumbing/transport/client/client.go | 48 + .../go-git.v4/plumbing/transport/common.go | 274 + .../plumbing/transport/file/client.go | 156 + .../plumbing/transport/file/server.go | 53 + .../plumbing/transport/git/common.go | 109 + .../plumbing/transport/http/common.go | 281 + .../plumbing/transport/http/receive_pack.go | 106 + .../plumbing/transport/http/upload_pack.go | 123 + .../transport/internal/common/common.go | 467 + .../transport/internal/common/server.go | 73 + .../plumbing/transport/server/loader.go | 64 + .../plumbing/transport/server/server.go | 422 + .../plumbing/transport/ssh/auth_method.go | 322 + .../plumbing/transport/ssh/common.go | 228 + vendor/gopkg.in/src-d/go-git.v4/prune.go | 66 + vendor/gopkg.in/src-d/go-git.v4/references.go | 264 + vendor/gopkg.in/src-d/go-git.v4/remote.go | 1114 ++ vendor/gopkg.in/src-d/go-git.v4/repository.go | 1545 +++ vendor/gopkg.in/src-d/go-git.v4/status.go | 79 + .../go-git.v4/storage/filesystem/config.go | 61 + .../storage/filesystem/deltaobject.go | 37 + .../storage/filesystem/dotgit/dotgit.go | 1099 ++ .../dotgit/dotgit_rewrite_packed_refs.go | 81 + .../filesystem/dotgit/dotgit_setref.go | 90 + .../storage/filesystem/dotgit/writers.go | 284 + .../go-git.v4/storage/filesystem/index.go | 54 + .../go-git.v4/storage/filesystem/module.go | 20 + .../go-git.v4/storage/filesystem/object.go | 815 ++ .../go-git.v4/storage/filesystem/reference.go | 44 + .../go-git.v4/storage/filesystem/shallow.go | 54 + .../go-git.v4/storage/filesystem/storage.go | 73 + .../src-d/go-git.v4/storage/memory/storage.go | 320 + .../src-d/go-git.v4/storage/storer.go | 30 + vendor/gopkg.in/src-d/go-git.v4/submodule.go | 357 + .../src-d/go-git.v4/utils/binary/read.go | 180 + .../src-d/go-git.v4/utils/binary/write.go | 50 + .../src-d/go-git.v4/utils/diff/diff.go | 61 + .../src-d/go-git.v4/utils/ioutil/common.go | 170 + .../go-git.v4/utils/merkletrie/change.go | 149 + .../go-git.v4/utils/merkletrie/difftree.go | 424 + .../src-d/go-git.v4/utils/merkletrie/doc.go | 34 + .../go-git.v4/utils/merkletrie/doubleiter.go | 187 + .../utils/merkletrie/filesystem/node.go | 196 + .../go-git.v4/utils/merkletrie/index/node.go | 90 + .../utils/merkletrie/internal/frame/frame.go | 91 + .../src-d/go-git.v4/utils/merkletrie/iter.go | 216 + .../go-git.v4/utils/merkletrie/noder/noder.go | 59 + .../go-git.v4/utils/merkletrie/noder/path.go | 90 + vendor/gopkg.in/src-d/go-git.v4/worktree.go | 954 ++ .../gopkg.in/src-d/go-git.v4/worktree_bsd.go | 26 + .../src-d/go-git.v4/worktree_commit.go | 228 + .../src-d/go-git.v4/worktree_linux.go | 26 + .../src-d/go-git.v4/worktree_status.go | 660 + .../src-d/go-git.v4/worktree_unix_other.go | 26 + .../src-d/go-git.v4/worktree_windows.go | 35 + vendor/gopkg.in/warnings.v0/LICENSE | 24 + vendor/gopkg.in/warnings.v0/README | 77 + vendor/gopkg.in/warnings.v0/warnings.go | 194 + .../apimachinery/pkg/util/httpstream/doc.go | 19 + .../pkg/util/httpstream/httpstream.go | 157 + .../pkg/util/httpstream/spdy/connection.go | 187 + .../pkg/util/httpstream/spdy/roundtripper.go | 369 + .../pkg/util/httpstream/spdy/upgrade.go | 120 + .../k8s.io/apimachinery/pkg/util/rand/rand.go | 127 + .../pkg/util/remotecommand/constants.go | 53 + .../third_party/forked/golang/netutil/addr.go | 27 + .../client-go/tools/remotecommand/doc.go | 20 + .../tools/remotecommand/errorstream.go | 55 + .../client-go/tools/remotecommand/reader.go | 41 + .../tools/remotecommand/remotecommand.go | 142 + .../client-go/tools/remotecommand/resize.go | 33 + .../client-go/tools/remotecommand/v1.go | 160 + .../client-go/tools/remotecommand/v2.go | 200 + .../client-go/tools/remotecommand/v3.go | 111 + .../client-go/tools/remotecommand/v4.go | 119 + .../k8s.io/client-go/transport/spdy/spdy.go | 105 + vendor/k8s.io/client-go/util/exec/exec.go | 52 + vendor/k8s.io/gengo/args/args.go | 6 + .../deepcopy-gen/generators/deepcopy.go | 2 +- vendor/k8s.io/gengo/generator/execute.go | 21 +- vendor/k8s.io/gengo/generator/generator.go | 3 + vendor/k8s.io/gengo/parser/parse.go | 91 +- vendor/k8s.io/gengo/types/types.go | 10 +- vendor/k8s.io/klog/.travis.yml | 16 - vendor/k8s.io/klog/CONTRIBUTING.md | 22 - vendor/k8s.io/klog/LICENSE | 191 - vendor/k8s.io/klog/README.md | 97 - vendor/k8s.io/klog/RELEASE.md | 9 - vendor/k8s.io/klog/SECURITY_CONTACTS | 20 - vendor/k8s.io/klog/code-of-conduct.md | 3 - vendor/k8s.io/klog/go.mod | 5 - vendor/k8s.io/klog/go.sum | 2 - vendor/k8s.io/klog/klog.go | 1308 -- vendor/k8s.io/klog/klog_file.go | 139 - .../k8s.io/kube-openapi/pkg/common/common.go | 2 +- .../kube-openapi/pkg/generators/openapi.go | 2 +- .../pkg/validation/spec/.gitignore | 2 + .../kube-openapi/pkg/validation/spec/LICENSE | 202 + .../pkg/validation/spec/contact_info.go | 24 + .../pkg/validation/spec/external_docs.go | 24 + .../pkg/validation/spec/header.go | 71 + .../kube-openapi/pkg/validation/spec/info.go | 155 + .../kube-openapi/pkg/validation/spec/items.go | 109 + .../pkg/validation/spec/license.go | 23 + .../pkg/validation/spec/operation.go | 96 + .../pkg/validation/spec/parameter.go | 111 + .../pkg/validation/spec/path_item.go | 74 + .../kube-openapi/pkg/validation/spec/paths.go | 85 + .../kube-openapi/pkg/validation/spec/ref.go | 167 + .../pkg/validation/spec/response.go | 78 + .../pkg/validation/spec/responses.go | 110 + .../pkg/validation/spec/schema.go | 513 + .../pkg/validation/spec/security_scheme.go | 64 + .../pkg/validation/spec/swagger.go | 286 + .../kube-openapi/pkg/validation/spec/tag.go | 59 + vendor/k8s.io/utils/pointer/pointer.go | 174 +- vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/knative.dev/hack/README.md | 7 +- vendor/knative.dev/hack/e2e-tests.sh | 13 +- vendor/knative.dev/hack/infra-library.sh | 26 +- vendor/knative.dev/hack/library.sh | 28 +- vendor/knative.dev/hack/presubmit-tests.sh | 42 +- vendor/knative.dev/hack/release.sh | 15 +- .../knative.dev/kn-plugin-func/.codecov.yaml | 5 + .../knative.dev/kn-plugin-func/.gitattributes | 3 + vendor/knative.dev/kn-plugin-func/.gitignore | 17 + .../knative.dev/kn-plugin-func/.golangci.yaml | 31 + vendor/knative.dev/kn-plugin-func/.ko.yaml | 3 + vendor/knative.dev/kn-plugin-func/AUTHORS | 7 + .../knative.dev/kn-plugin-func/CHANGELOG.md | 572 + vendor/knative.dev/kn-plugin-func/LICENSE | 202 + vendor/knative.dev/kn-plugin-func/Makefile | 170 + vendor/knative.dev/kn-plugin-func/README.md | 17 + .../kn-plugin-func/buildpacks/builder.go | 177 + vendor/knative.dev/kn-plugin-func/ci | 1 + vendor/knative.dev/kn-plugin-func/client.go | 969 ++ .../knative.dev/kn-plugin-func/cmd/build.go | 230 + .../knative.dev/kn-plugin-func/cmd/client.go | 127 + .../kn-plugin-func/cmd/completion.go | 48 + .../kn-plugin-func/cmd/completion_util.go | 130 + .../knative.dev/kn-plugin-func/cmd/config.go | 169 + .../kn-plugin-func/cmd/config_envs.go | 440 + .../kn-plugin-func/cmd/config_labels.go | 291 + .../kn-plugin-func/cmd/config_volumes.go | 257 + .../knative.dev/kn-plugin-func/cmd/create.go | 560 + .../knative.dev/kn-plugin-func/cmd/delete.go | 160 + .../knative.dev/kn-plugin-func/cmd/deploy.go | 381 + .../knative.dev/kn-plugin-func/cmd/format.go | 54 + vendor/knative.dev/kn-plugin-func/cmd/info.go | 169 + .../knative.dev/kn-plugin-func/cmd/invoke.go | 382 + vendor/knative.dev/kn-plugin-func/cmd/list.go | 155 + .../kn-plugin-func/cmd/repository.go | 684 + vendor/knative.dev/kn-plugin-func/cmd/root.go | 429 + vendor/knative.dev/kn-plugin-func/cmd/run.go | 160 + .../knative.dev/kn-plugin-func/cmd/version.go | 28 + vendor/knative.dev/kn-plugin-func/config.go | 5 + .../docker/creds/credentials.go | 455 + .../kn-plugin-func/docker/docker_client.go | 154 + .../docker/docker_client_generated.go | 488 + .../kn-plugin-func/docker/pusher.go | 299 + .../kn-plugin-func/docker/runner.go | 290 + .../knative.dev/kn-plugin-func/filesystem.go | 158 + vendor/knative.dev/kn-plugin-func/function.go | 456 + .../kn-plugin-func/function_buildtype.go | 55 + .../kn-plugin-func/function_envs.go | 126 + .../kn-plugin-func/function_git.go | 40 + .../kn-plugin-func/function_labels.go | 75 + .../kn-plugin-func/function_migrations.go | 114 + .../kn-plugin-func/function_options.go | 139 + .../kn-plugin-func/function_volumes.go | 49 + vendor/knative.dev/kn-plugin-func/go.mod | 49 + vendor/knative.dev/kn-plugin-func/go.sum | 2912 ++++ .../kn-plugin-func/http/transport.go | 194 + .../knative.dev/kn-plugin-func/instances.go | 118 + vendor/knative.dev/kn-plugin-func/invoke.go | 177 + vendor/knative.dev/kn-plugin-func/job.go | 92 + .../knative.dev/kn-plugin-func/k8s/client.go | 46 + .../kn-plugin-func/k8s/configmaps.go | 35 + .../knative.dev/kn-plugin-func/k8s/dialer.go | 397 + .../kn-plugin-func/k8s/labels/labels.go | 13 + .../kn-plugin-func/k8s/persistent_volumes.go | 52 + .../kn-plugin-func/k8s/role_bidings.go | 46 + .../knative.dev/kn-plugin-func/k8s/secrets.go | 117 + .../kn-plugin-func/k8s/service_accounts.go | 42 + .../kn-plugin-func/knative/client.go | 51 + .../kn-plugin-func/knative/deployer.go | 761 + .../kn-plugin-func/knative/describer.go | 104 + .../kn-plugin-func/knative/lister.go | 96 + .../kn-plugin-func/knative/remover.go | 43 + .../kn-plugin-func/openshift/openshift.go | 166 + .../kn-plugin-func/pipelines/tekton/client.go | 43 + .../pipelines/tekton/defaults.go | 7 + .../pipelines/tekton/pipeplines_provider.go | 278 + .../pipelines/tekton/resources.go | 171 + .../kn-plugin-func/pipelines/tekton/tasks.go | 116 + vendor/knative.dev/kn-plugin-func/pkged.go | 13 + .../kn-plugin-func/plugin/plugin.go | 71 + .../kn-plugin-func/progress/progress.go | 251 + .../kn-plugin-func/repositories.go | 215 + .../knative.dev/kn-plugin-func/repository.go | 478 + .../knative.dev/kn-plugin-func/sortedset.go | 45 + .../kn-plugin-func/ssh/ssh_agent_conf.go | 12 + .../ssh/ssh_agent_conf_windows.go | 17 + .../kn-plugin-func/ssh/ssh_dialer.go | 455 + .../kn-plugin-func/ssh/terminal.go | 118 + vendor/knative.dev/kn-plugin-func/template.go | 40 + .../knative.dev/kn-plugin-func/templates.go | 190 + .../knative.dev/kn-plugin-func/utils/names.go | 104 + vendor/knative.dev/kn-plugin-func/version.txt | 1 + .../pkg/apis/duck/v1/source_types.go | 10 +- .../pkg/apis/duck/v1/status_types.go | 5 +- .../pkg/apis/duck/v1beta1/status_types.go | 5 +- .../cmd/injection-gen/generators/client.go | 14 +- .../generators/comment_parser.go | 57 +- .../cmd/injection-gen/generators/duck.go | 2 +- .../cmd/injection-gen/generators/factory.go | 2 +- .../injection-gen/generators/fake_client.go | 2 +- .../cmd/injection-gen/generators/fake_duck.go | 2 +- .../injection-gen/generators/fake_factory.go | 2 +- .../generators/fake_filtered_factory.go | 2 +- .../generators/fake_filtered_informer.go | 2 +- .../injection-gen/generators/fake_informer.go | 2 +- .../generators/filtered_factory.go | 3 +- .../generators/filtered_informer.go | 2 +- .../cmd/injection-gen/generators/informer.go | 2 +- .../cmd/injection-gen/generators/packages.go | 35 +- .../generators/reconciler_controller.go | 22 +- .../generators/reconciler_controller_stub.go | 24 +- .../generators/reconciler_reconciler.go | 43 +- .../generators/reconciler_reconciler_stub.go | 2 +- .../generators/reconciler_state.go | 2 +- .../pkg/codegen/cmd/injection-gen/main.go | 2 +- vendor/knative.dev/pkg/configmap/parse.go | 28 + vendor/knative.dev/pkg/injection/config.go | 2 +- vendor/knative.dev/pkg/injection/interface.go | 8 + vendor/knative.dev/pkg/kmap/lookup.go | 77 + vendor/knative.dev/pkg/kmap/map.go | 80 + vendor/knative.dev/pkg/kmeta/map.go | 38 +- .../knative.dev/pkg/leaderelection/config.go | 6 +- vendor/knative.dev/pkg/leaderelection/doc.go | 27 + vendor/knative.dev/pkg/metrics/config.go | 19 +- vendor/knative.dev/pkg/reconciler/filter.go | 12 + .../pkg/resolver/addressable_resolver.go | 8 +- .../knative.dev/pkg/test/logging/logging.go | 2 +- .../pkg/test/monitoring/monitoring.go | 23 +- vendor/knative.dev/pkg/test/request.go | 4 +- .../pkg/test/spoof/error_checks.go | 6 + vendor/knative.dev/pkg/test/spoof/spoof.go | 10 +- vendor/modules.txt | 597 +- 2852 files changed, 412263 insertions(+), 2519 deletions(-) create mode 100644 vendor/github.com/AlecAivazis/survey/v2/.travis.yml create mode 100644 vendor/github.com/AlecAivazis/survey/v2/CONTRIBUTING.md create mode 100644 vendor/github.com/AlecAivazis/survey/v2/Gopkg.lock create mode 100644 vendor/github.com/AlecAivazis/survey/v2/Gopkg.toml create mode 100644 vendor/github.com/AlecAivazis/survey/v2/LICENSE create mode 100644 vendor/github.com/AlecAivazis/survey/v2/README.md create mode 100644 vendor/github.com/AlecAivazis/survey/v2/_tasks.hcl create mode 100644 vendor/github.com/AlecAivazis/survey/v2/confirm.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/core/template.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/core/write.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/editor.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/filter.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/go.mod create mode 100644 vendor/github.com/AlecAivazis/survey/v2/go.sum create mode 100644 vendor/github.com/AlecAivazis/survey/v2/input.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/multiline.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/multiselect.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/password.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/renderer.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/select.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/survey.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/README.md create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/buffered_reader.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/cursor.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/cursor_windows.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/display.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/display_posix.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/display_windows.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/error.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/output.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/output_windows.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/runereader.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_bsd.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_linux.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_posix.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_ppc64le.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_windows.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/sequences.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/stdio.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/syscall_windows.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/terminal/terminal.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/transform.go create mode 100644 vendor/github.com/AlecAivazis/survey/v2/validate.go create mode 100644 vendor/github.com/Azure/go-ansiterm/LICENSE create mode 100644 vendor/github.com/Azure/go-ansiterm/README.md create mode 100644 vendor/github.com/Azure/go-ansiterm/constants.go create mode 100644 vendor/github.com/Azure/go-ansiterm/context.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_entry_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/csi_param_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/escape_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/event_handler.go create mode 100644 vendor/github.com/Azure/go-ansiterm/ground_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/osc_string_state.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/parser_actions.go create mode 100644 vendor/github.com/Azure/go-ansiterm/states.go create mode 100644 vendor/github.com/Azure/go-ansiterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/ansi.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/api.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/utilities.go create mode 100644 vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go create mode 100644 vendor/github.com/BurntSushi/toml/.gitignore create mode 100644 vendor/github.com/BurntSushi/toml/COMPATIBLE create mode 100644 vendor/github.com/BurntSushi/toml/COPYING create mode 100644 vendor/github.com/BurntSushi/toml/README.md create mode 100644 vendor/github.com/BurntSushi/toml/decode.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_go116.go create mode 100644 vendor/github.com/BurntSushi/toml/decode_meta.go create mode 100644 vendor/github.com/BurntSushi/toml/deprecated.go create mode 100644 vendor/github.com/BurntSushi/toml/doc.go create mode 100644 vendor/github.com/BurntSushi/toml/encode.go create mode 100644 vendor/github.com/BurntSushi/toml/go.mod create mode 100644 vendor/github.com/BurntSushi/toml/go.sum create mode 100644 vendor/github.com/BurntSushi/toml/internal/tz.go create mode 100644 vendor/github.com/BurntSushi/toml/lex.go create mode 100644 vendor/github.com/BurntSushi/toml/parse.go create mode 100644 vendor/github.com/BurntSushi/toml/type_check.go create mode 100644 vendor/github.com/BurntSushi/toml/type_fields.go create mode 100644 vendor/github.com/Masterminds/semver/.travis.yml create mode 100644 vendor/github.com/Masterminds/semver/CHANGELOG.md create mode 100644 vendor/github.com/Masterminds/semver/LICENSE.txt create mode 100644 vendor/github.com/Masterminds/semver/Makefile create mode 100644 vendor/github.com/Masterminds/semver/README.md create mode 100644 vendor/github.com/Masterminds/semver/appveyor.yml create mode 100644 vendor/github.com/Masterminds/semver/collection.go create mode 100644 vendor/github.com/Masterminds/semver/constraints.go create mode 100644 vendor/github.com/Masterminds/semver/doc.go create mode 100644 vendor/github.com/Masterminds/semver/version.go create mode 100644 vendor/github.com/Masterminds/semver/version_fuzz.go create mode 100644 vendor/github.com/Microsoft/go-winio/.gitignore create mode 100644 vendor/github.com/Microsoft/go-winio/CODEOWNERS create mode 100644 vendor/github.com/Microsoft/go-winio/LICENSE create mode 100644 vendor/github.com/Microsoft/go-winio/README.md create mode 100644 vendor/github.com/Microsoft/go-winio/backup.go create mode 100644 vendor/github.com/Microsoft/go-winio/ea.go create mode 100644 vendor/github.com/Microsoft/go-winio/file.go create mode 100644 vendor/github.com/Microsoft/go-winio/fileinfo.go create mode 100644 vendor/github.com/Microsoft/go-winio/go.mod create mode 100644 vendor/github.com/Microsoft/go-winio/go.sum create mode 100644 vendor/github.com/Microsoft/go-winio/hvsock.go create mode 100644 vendor/github.com/Microsoft/go-winio/pipe.go create mode 100644 vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go create mode 100644 vendor/github.com/Microsoft/go-winio/privilege.go create mode 100644 vendor/github.com/Microsoft/go-winio/reparse.go create mode 100644 vendor/github.com/Microsoft/go-winio/sd.go create mode 100644 vendor/github.com/Microsoft/go-winio/syscall.go create mode 100644 vendor/github.com/Microsoft/go-winio/zsyscall_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/LICENSE create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go create mode 100644 vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/AUTHORS create mode 100644 vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS create mode 100644 vendor/github.com/ProtonMail/go-crypto/LICENSE create mode 100644 vendor/github.com/ProtonMail/go-crypto/PATENTS create mode 100644 vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/eax/eax.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/read.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go create mode 100644 vendor/github.com/ProtonMail/go-crypto/openpgp/write.go create mode 100644 vendor/github.com/acomagu/bufpipe/README.md create mode 100644 vendor/github.com/acomagu/bufpipe/bufpipe.go create mode 100644 vendor/github.com/acomagu/bufpipe/doc.go create mode 100644 vendor/github.com/acomagu/bufpipe/go.mod create mode 100644 vendor/github.com/acomagu/bufpipe/go.sum create mode 100644 vendor/github.com/antlr/antlr4/LICENSE.txt create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trace_listener.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/transition.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go create mode 100644 vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go create mode 100644 vendor/github.com/apex/log/.gitignore create mode 100644 vendor/github.com/apex/log/History.md create mode 100644 vendor/github.com/apex/log/LICENSE create mode 100644 vendor/github.com/apex/log/Makefile create mode 100644 vendor/github.com/apex/log/Readme.md create mode 100644 vendor/github.com/apex/log/context.go create mode 100644 vendor/github.com/apex/log/default.go create mode 100644 vendor/github.com/apex/log/doc.go create mode 100644 vendor/github.com/apex/log/entry.go create mode 100644 vendor/github.com/apex/log/go.mod create mode 100644 vendor/github.com/apex/log/go.sum create mode 100644 vendor/github.com/apex/log/interface.go create mode 100644 vendor/github.com/apex/log/levels.go create mode 100644 vendor/github.com/apex/log/logger.go create mode 100644 vendor/github.com/apex/log/pkg.go create mode 100644 vendor/github.com/apex/log/stack.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/.gitignore create mode 100644 vendor/github.com/bits-and-blooms/bitset/.travis.yml create mode 100644 vendor/github.com/bits-and-blooms/bitset/LICENSE create mode 100644 vendor/github.com/bits-and-blooms/bitset/README.md create mode 100644 vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml create mode 100644 vendor/github.com/bits-and-blooms/bitset/bitset.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/go.mod create mode 100644 vendor/github.com/bits-and-blooms/bitset/go.sum create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_19.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s create mode 100644 vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go create mode 100644 vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go create mode 100644 vendor/github.com/buildpacks/imgutil/.gitignore create mode 100644 vendor/github.com/buildpacks/imgutil/CODEOWNERS create mode 100644 vendor/github.com/buildpacks/imgutil/LICENSE create mode 100644 vendor/github.com/buildpacks/imgutil/Makefile create mode 100644 vendor/github.com/buildpacks/imgutil/README.md create mode 100644 vendor/github.com/buildpacks/imgutil/go.mod create mode 100644 vendor/github.com/buildpacks/imgutil/go.sum create mode 100644 vendor/github.com/buildpacks/imgutil/golangci.yaml create mode 100644 vendor/github.com/buildpacks/imgutil/image.go create mode 100644 vendor/github.com/buildpacks/imgutil/layer/bcdhive_generated.go create mode 100644 vendor/github.com/buildpacks/imgutil/layer/pax_permissions.go create mode 100644 vendor/github.com/buildpacks/imgutil/layer/windows_baselayer.go create mode 100644 vendor/github.com/buildpacks/imgutil/layer/windows_writer.go create mode 100644 vendor/github.com/buildpacks/imgutil/local/identifier.go create mode 100644 vendor/github.com/buildpacks/imgutil/local/local.go create mode 100644 vendor/github.com/buildpacks/imgutil/remote/identifier.go create mode 100644 vendor/github.com/buildpacks/imgutil/remote/remote.go create mode 100644 vendor/github.com/buildpacks/lifecycle/.dockerignore create mode 100644 vendor/github.com/buildpacks/lifecycle/.gitignore create mode 100644 vendor/github.com/buildpacks/lifecycle/.gitpod.yml create mode 100644 vendor/github.com/buildpacks/lifecycle/CODEOWNERS create mode 100644 vendor/github.com/buildpacks/lifecycle/CONTRIBUTING.md create mode 100644 vendor/github.com/buildpacks/lifecycle/DEVELOPMENT.md create mode 100644 vendor/github.com/buildpacks/lifecycle/IMAGE.md create mode 100644 vendor/github.com/buildpacks/lifecycle/LICENSE create mode 100644 vendor/github.com/buildpacks/lifecycle/Makefile create mode 100644 vendor/github.com/buildpacks/lifecycle/README.md create mode 100644 vendor/github.com/buildpacks/lifecycle/RELEASE.md create mode 100644 vendor/github.com/buildpacks/lifecycle/analyzer.go create mode 100644 vendor/github.com/buildpacks/lifecycle/api/apis.go create mode 100644 vendor/github.com/buildpacks/lifecycle/api/version.go create mode 100644 vendor/github.com/buildpacks/lifecycle/archive/archive.go create mode 100644 vendor/github.com/buildpacks/lifecycle/archive/extract.go create mode 100644 vendor/github.com/buildpacks/lifecycle/archive/reader.go create mode 100644 vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go create mode 100644 vendor/github.com/buildpacks/lifecycle/archive/tar_windows.go create mode 100644 vendor/github.com/buildpacks/lifecycle/archive/writer.go create mode 100644 vendor/github.com/buildpacks/lifecycle/auth/env_keychain.go create mode 100644 vendor/github.com/buildpacks/lifecycle/builder.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/build.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/descriptor.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/detect.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/error.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/files.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/layertypes/types.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/store.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/v05/encoderdecoder.go create mode 100644 vendor/github.com/buildpacks/lifecycle/buildpack/v06/encoderdecoder.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cache.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cmd/command.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cmd/exit.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cmd/flags.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cmd/flags_unix.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cmd/flags_windows.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cmd/logs.go create mode 100644 vendor/github.com/buildpacks/lifecycle/cmd/version.go create mode 100644 vendor/github.com/buildpacks/lifecycle/codecov.yml create mode 100644 vendor/github.com/buildpacks/lifecycle/cosign.pub create mode 100644 vendor/github.com/buildpacks/lifecycle/detector.go create mode 100644 vendor/github.com/buildpacks/lifecycle/env/build.go create mode 100644 vendor/github.com/buildpacks/lifecycle/env/env.go create mode 100644 vendor/github.com/buildpacks/lifecycle/env/launch.go create mode 100644 vendor/github.com/buildpacks/lifecycle/env/vars.go create mode 100644 vendor/github.com/buildpacks/lifecycle/exporter.go create mode 100644 vendor/github.com/buildpacks/lifecycle/go.mod create mode 100644 vendor/github.com/buildpacks/lifecycle/go.sum create mode 100644 vendor/github.com/buildpacks/lifecycle/golangci.yaml create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/bash.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/cmd.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/exec_d.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/exec_d_windows.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/launch.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/launcher.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/launcher_windows.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/process.go create mode 100644 vendor/github.com/buildpacks/lifecycle/launch/shell.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layer_metadata_restorer.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers/digest.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers/dir.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers/extract.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers/factory.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers/launcher.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers/slices.go create mode 100644 vendor/github.com/buildpacks/lifecycle/layers/writer.go create mode 100644 vendor/github.com/buildpacks/lifecycle/lifecycle.toml create mode 100644 vendor/github.com/buildpacks/lifecycle/logger.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/cache.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/common/platform.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/files.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/labels.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/platform.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/pre06/exit.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/pre06/platform.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/v06/exit.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/v06/platform.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/v07/exit.go create mode 100644 vendor/github.com/buildpacks/lifecycle/platform/v07/platform.go create mode 100644 vendor/github.com/buildpacks/lifecycle/rebaser.go create mode 100644 vendor/github.com/buildpacks/lifecycle/restorer.go create mode 100644 vendor/github.com/buildpacks/lifecycle/save.go create mode 100644 vendor/github.com/buildpacks/lifecycle/utils.go create mode 100644 vendor/github.com/buildpacks/pack/.gitignore create mode 100644 vendor/github.com/buildpacks/pack/.gitpod.yml create mode 100644 vendor/github.com/buildpacks/pack/CODEOWNERS create mode 100644 vendor/github.com/buildpacks/pack/CONTRIBUTING.md create mode 100644 vendor/github.com/buildpacks/pack/DEVELOPMENT.md create mode 100644 vendor/github.com/buildpacks/pack/LICENSE create mode 100644 vendor/github.com/buildpacks/pack/Makefile create mode 100644 vendor/github.com/buildpacks/pack/README.md create mode 100644 vendor/github.com/buildpacks/pack/RELEASE.md create mode 100644 vendor/github.com/buildpacks/pack/builder/config_reader.go create mode 100644 vendor/github.com/buildpacks/pack/builder/detection_order.go create mode 100644 vendor/github.com/buildpacks/pack/buildpack.yml create mode 100644 vendor/github.com/buildpacks/pack/buildpackage/config_reader.go create mode 100644 vendor/github.com/buildpacks/pack/codecov.yml create mode 100644 vendor/github.com/buildpacks/pack/go.mod create mode 100644 vendor/github.com/buildpacks/pack/go.sum create mode 100644 vendor/github.com/buildpacks/pack/golangci.yaml create mode 100644 vendor/github.com/buildpacks/pack/internal/build/container_ops.go create mode 100644 vendor/github.com/buildpacks/pack/internal/build/lifecycle_execution.go create mode 100644 vendor/github.com/buildpacks/pack/internal/build/lifecycle_executor.go create mode 100644 vendor/github.com/buildpacks/pack/internal/build/mount_paths.go create mode 100644 vendor/github.com/buildpacks/pack/internal/build/phase.go create mode 100644 vendor/github.com/buildpacks/pack/internal/build/phase_config_provider.go create mode 100644 vendor/github.com/buildpacks/pack/internal/build/phase_factory.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/builder.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/descriptor.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/detection_order_calculator.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/image_fetcher_wrapper.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/inspect.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/label_manager.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/label_manager_provider.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/lifecycle.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/metadata.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/suggested_builder.go create mode 100644 vendor/github.com/buildpacks/pack/internal/builder/version.go create mode 100644 vendor/github.com/buildpacks/pack/internal/cache/consts.go create mode 100644 vendor/github.com/buildpacks/pack/internal/cache/image_cache.go create mode 100644 vendor/github.com/buildpacks/pack/internal/cache/volume_cache.go create mode 100644 vendor/github.com/buildpacks/pack/internal/config/config.go create mode 100644 vendor/github.com/buildpacks/pack/internal/config/config_helpers.go create mode 100644 vendor/github.com/buildpacks/pack/internal/container/run.go create mode 100644 vendor/github.com/buildpacks/pack/internal/layer/layer.go create mode 100644 vendor/github.com/buildpacks/pack/internal/layer/writer_factory.go create mode 100644 vendor/github.com/buildpacks/pack/internal/name/name.go create mode 100644 vendor/github.com/buildpacks/pack/internal/paths/paths.go create mode 100644 vendor/github.com/buildpacks/pack/internal/registry/buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/internal/registry/git.go create mode 100644 vendor/github.com/buildpacks/pack/internal/registry/github.go create mode 100644 vendor/github.com/buildpacks/pack/internal/registry/index.go create mode 100644 vendor/github.com/buildpacks/pack/internal/registry/registry_cache.go create mode 100644 vendor/github.com/buildpacks/pack/internal/stack/merge.go create mode 100644 vendor/github.com/buildpacks/pack/internal/stack/mixins.go create mode 100644 vendor/github.com/buildpacks/pack/internal/stringset/stringset.go create mode 100644 vendor/github.com/buildpacks/pack/internal/style/style.go create mode 100644 vendor/github.com/buildpacks/pack/internal/term/term.go create mode 100644 vendor/github.com/buildpacks/pack/internal/termui/detect.go create mode 100644 vendor/github.com/buildpacks/pack/internal/termui/logger.go create mode 100644 vendor/github.com/buildpacks/pack/internal/termui/termui.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/archive/archive.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/archive/tar_builder.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/blob/blob.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/blob/downloader.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/builder.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/buildpackage.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/downloader.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/locator_type.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/oci_layout_package.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/package.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/buildpack/parse_name.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/build.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/client.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/common.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/create_builder.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/errors.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/inspect_builder.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/inspect_buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/inspect_image.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/new_buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/package_buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/pull_buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/rebase.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/register_buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/version.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/client/yank_buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/dist/buildpack.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/dist/buildpack_descriptor.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/dist/dist.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/dist/distribution.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/dist/image.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/dist/layers.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/image/fetcher.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/image/pull_policy.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/logging/logger_simple.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/logging/logger_writers.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/logging/logging.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/logging/prefix_writer.go create mode 100644 vendor/github.com/buildpacks/pack/pkg/project/types/types.go create mode 100644 vendor/github.com/buildpacks/pack/project.toml create mode 100644 vendor/github.com/buildpacks/pack/version.go create mode 100644 vendor/github.com/cespare/xxhash/LICENSE.txt create mode 100644 vendor/github.com/cespare/xxhash/README.md create mode 100644 vendor/github.com/cespare/xxhash/go.mod create mode 100644 vendor/github.com/cespare/xxhash/go.sum create mode 100644 vendor/github.com/cespare/xxhash/rotate.go create mode 100644 vendor/github.com/cespare/xxhash/rotate19.go create mode 100644 vendor/github.com/cespare/xxhash/xxhash.go create mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.go create mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.s create mode 100644 vendor/github.com/cespare/xxhash/xxhash_other.go create mode 100644 vendor/github.com/cespare/xxhash/xxhash_safe.go create mode 100644 vendor/github.com/cespare/xxhash/xxhash_unsafe.go create mode 100644 vendor/github.com/containerd/containerd/LICENSE create mode 100644 vendor/github.com/containerd/containerd/NOTICE create mode 100644 vendor/github.com/containerd/containerd/errdefs/errors.go create mode 100644 vendor/github.com/containerd/containerd/errdefs/grpc.go create mode 100644 vendor/github.com/containerd/containerd/log/context.go create mode 100644 vendor/github.com/containerd/containerd/platforms/compare.go create mode 100644 vendor/github.com/containerd/containerd/platforms/cpuinfo.go create mode 100644 vendor/github.com/containerd/containerd/platforms/database.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_unix.go create mode 100644 vendor/github.com/containerd/containerd/platforms/defaults_windows.go create mode 100644 vendor/github.com/containerd/containerd/platforms/platforms.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/build.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go create mode 100644 vendor/github.com/containerd/stargz-snapshotter/estargz/types.go create mode 100644 vendor/github.com/containers/image/v5/LICENSE create mode 100644 vendor/github.com/containers/image/v5/docker/reference/README.md create mode 100644 vendor/github.com/containers/image/v5/docker/reference/helpers.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/normalize.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/reference.go create mode 100644 vendor/github.com/containers/image/v5/docker/reference/regexp.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go create mode 100644 vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/internal/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/compression/types/types.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go create mode 100644 vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go create mode 100644 vendor/github.com/containers/image/v5/types/types.go create mode 100644 vendor/github.com/containers/storage/AUTHORS create mode 100644 vendor/github.com/containers/storage/LICENSE create mode 100644 vendor/github.com/containers/storage/NOTICE create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_others.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/parser.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mount.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mountinfo.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/unmount_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/README.md create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/reexec/reexec.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chmod.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/errors.go create mode 100644 vendor/github.com/containers/storage/pkg/system/exitcode.go create mode 100644 vendor/github.com/containers/storage/pkg/system/init.go create mode 100644 vendor/github.com/containers/storage/pkg/system/init_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lchown.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lcow_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lcow_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lstat_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod.go create mode 100644 vendor/github.com/containers/storage/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/path_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/process_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/rm.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/stat_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/containers/storage/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/umask.go create mode 100644 vendor/github.com/containers/storage/pkg/system/umask_windows.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare.c create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go create mode 100644 vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go create mode 100644 vendor/github.com/coreos/go-semver/LICENSE create mode 100644 vendor/github.com/coreos/go-semver/NOTICE create mode 100644 vendor/github.com/coreos/go-semver/semver/semver.go create mode 100644 vendor/github.com/coreos/go-semver/semver/sort.go create mode 100644 vendor/github.com/dgraph-io/ristretto/.deepsource.toml create mode 100644 vendor/github.com/dgraph-io/ristretto/LICENSE create mode 100644 vendor/github.com/dgraph-io/ristretto/README.md create mode 100644 vendor/github.com/dgraph-io/ristretto/cache.go create mode 100644 vendor/github.com/dgraph-io/ristretto/go.mod create mode 100644 vendor/github.com/dgraph-io/ristretto/go.sum create mode 100644 vendor/github.com/dgraph-io/ristretto/policy.go create mode 100644 vendor/github.com/dgraph-io/ristretto/ring.go create mode 100644 vendor/github.com/dgraph-io/ristretto/sketch.go create mode 100644 vendor/github.com/dgraph-io/ristretto/store.go create mode 100644 vendor/github.com/dgraph-io/ristretto/z/LICENSE create mode 100644 vendor/github.com/dgraph-io/ristretto/z/README.md create mode 100644 vendor/github.com/dgraph-io/ristretto/z/bbloom.go create mode 100644 vendor/github.com/dgraph-io/ristretto/z/rtutil.go create mode 100644 vendor/github.com/dgraph-io/ristretto/z/rtutil.s create mode 100644 vendor/github.com/dgraph-io/ristretto/z/z.go create mode 100644 vendor/github.com/docker/cli/AUTHORS create mode 100644 vendor/github.com/docker/cli/LICENSE create mode 100644 vendor/github.com/docker/cli/NOTICE create mode 100644 vendor/github.com/docker/cli/cli/config/config.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_unix.go create mode 100644 vendor/github.com/docker/cli/cli/config/configfile/file_windows.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/credentials.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/file_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/credentials/native_store.go create mode 100644 vendor/github.com/docker/cli/cli/config/types/authconfig.go create mode 100644 vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go create mode 100644 vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_linux.go create mode 100644 vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go create mode 100644 vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go create mode 100644 vendor/github.com/docker/cli/cli/connhelper/commandconn/session_windows.go create mode 100644 vendor/github.com/docker/cli/cli/connhelper/connhelper.go create mode 100644 vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go create mode 100644 vendor/github.com/docker/distribution/LICENSE create mode 100644 vendor/github.com/docker/distribution/digestset/set.go create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/distribution/reference/reference.go create mode 100644 vendor/github.com/docker/distribution/reference/regexp.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/errors.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/handler.go create mode 100644 vendor/github.com/docker/distribution/registry/api/errcode/register.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go create mode 100644 vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/LICENSE create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/client.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/client/command.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/error.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/helper.go create mode 100644 vendor/github.com/docker/docker-credential-helpers/credentials/version.go create mode 100644 vendor/github.com/docker/docker/AUTHORS create mode 100644 vendor/github.com/docker/docker/LICENSE create mode 100644 vendor/github.com/docker/docker/NOTICE create mode 100644 vendor/github.com/docker/docker/api/README.md create mode 100644 vendor/github.com/docker/docker/api/common.go create mode 100644 vendor/github.com/docker/docker/api/common_unix.go create mode 100644 vendor/github.com/docker/docker/api/common_windows.go create mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml create mode 100644 vendor/github.com/docker/docker/api/swagger.yaml create mode 100644 vendor/github.com/docker/docker/api/types/auth.go create mode 100644 vendor/github.com/docker/docker/api/types/blkiodev/blkio.go create mode 100644 vendor/github.com/docker/docker/api/types/client.go create mode 100644 vendor/github.com/docker/docker/api/types/configs.go create mode 100644 vendor/github.com/docker/docker/api/types/container/config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_changes.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_top.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go create mode 100644 vendor/github.com/docker/docker/api/types/container/host_config.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go create mode 100644 vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go create mode 100644 vendor/github.com/docker/docker/api/types/container/waitcondition.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response_ext.go create mode 100644 vendor/github.com/docker/docker/api/types/events/events.go create mode 100644 vendor/github.com/docker/docker/api/types/filters/parse.go create mode 100644 vendor/github.com/docker/docker/api/types/graph_driver_data.go create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image/image_history.go create mode 100644 vendor/github.com/docker/docker/api/types/image_delete_response_item.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/mount/mount.go create mode 100644 vendor/github.com/docker/docker/api/types/network/network.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/registry.go create mode 100644 vendor/github.com/docker/docker/api/types/service_update_response.go create mode 100644 vendor/github.com/docker/docker/api/types/stats.go create mode 100644 vendor/github.com/docker/docker/api/types/strslice/strslice.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/common.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/config.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/container.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/network.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/node.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/service.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/swarm.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/task.go create mode 100644 vendor/github.com/docker/docker/api/types/time/duration_convert.go create mode 100644 vendor/github.com/docker/docker/api/types/time/timestamp.go create mode 100644 vendor/github.com/docker/docker/api/types/types.go create mode 100644 vendor/github.com/docker/docker/api/types/versions/README.md create mode 100644 vendor/github.com/docker/docker/api/types/versions/compare.go create mode 100644 vendor/github.com/docker/docker/api/types/volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_create.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volume_list.go create mode 100644 vendor/github.com/docker/docker/client/README.md create mode 100644 vendor/github.com/docker/docker/client/build_cancel.go create mode 100644 vendor/github.com/docker/docker/client/build_prune.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_create.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_delete.go create mode 100644 vendor/github.com/docker/docker/client/checkpoint_list.go create mode 100644 vendor/github.com/docker/docker/client/client.go create mode 100644 vendor/github.com/docker/docker/client/client_deprecated.go create mode 100644 vendor/github.com/docker/docker/client/client_unix.go create mode 100644 vendor/github.com/docker/docker/client/client_windows.go create mode 100644 vendor/github.com/docker/docker/client/config_create.go create mode 100644 vendor/github.com/docker/docker/client/config_inspect.go create mode 100644 vendor/github.com/docker/docker/client/config_list.go create mode 100644 vendor/github.com/docker/docker/client/config_remove.go create mode 100644 vendor/github.com/docker/docker/client/config_update.go create mode 100644 vendor/github.com/docker/docker/client/container_attach.go create mode 100644 vendor/github.com/docker/docker/client/container_commit.go create mode 100644 vendor/github.com/docker/docker/client/container_copy.go create mode 100644 vendor/github.com/docker/docker/client/container_create.go create mode 100644 vendor/github.com/docker/docker/client/container_diff.go create mode 100644 vendor/github.com/docker/docker/client/container_exec.go create mode 100644 vendor/github.com/docker/docker/client/container_export.go create mode 100644 vendor/github.com/docker/docker/client/container_inspect.go create mode 100644 vendor/github.com/docker/docker/client/container_kill.go create mode 100644 vendor/github.com/docker/docker/client/container_list.go create mode 100644 vendor/github.com/docker/docker/client/container_logs.go create mode 100644 vendor/github.com/docker/docker/client/container_pause.go create mode 100644 vendor/github.com/docker/docker/client/container_prune.go create mode 100644 vendor/github.com/docker/docker/client/container_remove.go create mode 100644 vendor/github.com/docker/docker/client/container_rename.go create mode 100644 vendor/github.com/docker/docker/client/container_resize.go create mode 100644 vendor/github.com/docker/docker/client/container_restart.go create mode 100644 vendor/github.com/docker/docker/client/container_start.go create mode 100644 vendor/github.com/docker/docker/client/container_stats.go create mode 100644 vendor/github.com/docker/docker/client/container_stop.go create mode 100644 vendor/github.com/docker/docker/client/container_top.go create mode 100644 vendor/github.com/docker/docker/client/container_unpause.go create mode 100644 vendor/github.com/docker/docker/client/container_update.go create mode 100644 vendor/github.com/docker/docker/client/container_wait.go create mode 100644 vendor/github.com/docker/docker/client/disk_usage.go create mode 100644 vendor/github.com/docker/docker/client/distribution_inspect.go create mode 100644 vendor/github.com/docker/docker/client/errors.go create mode 100644 vendor/github.com/docker/docker/client/events.go create mode 100644 vendor/github.com/docker/docker/client/hijack.go create mode 100644 vendor/github.com/docker/docker/client/image_build.go create mode 100644 vendor/github.com/docker/docker/client/image_create.go create mode 100644 vendor/github.com/docker/docker/client/image_history.go create mode 100644 vendor/github.com/docker/docker/client/image_import.go create mode 100644 vendor/github.com/docker/docker/client/image_inspect.go create mode 100644 vendor/github.com/docker/docker/client/image_list.go create mode 100644 vendor/github.com/docker/docker/client/image_load.go create mode 100644 vendor/github.com/docker/docker/client/image_prune.go create mode 100644 vendor/github.com/docker/docker/client/image_pull.go create mode 100644 vendor/github.com/docker/docker/client/image_push.go create mode 100644 vendor/github.com/docker/docker/client/image_remove.go create mode 100644 vendor/github.com/docker/docker/client/image_save.go create mode 100644 vendor/github.com/docker/docker/client/image_search.go create mode 100644 vendor/github.com/docker/docker/client/image_tag.go create mode 100644 vendor/github.com/docker/docker/client/info.go create mode 100644 vendor/github.com/docker/docker/client/interface.go create mode 100644 vendor/github.com/docker/docker/client/interface_experimental.go create mode 100644 vendor/github.com/docker/docker/client/interface_stable.go create mode 100644 vendor/github.com/docker/docker/client/login.go create mode 100644 vendor/github.com/docker/docker/client/network_connect.go create mode 100644 vendor/github.com/docker/docker/client/network_create.go create mode 100644 vendor/github.com/docker/docker/client/network_disconnect.go create mode 100644 vendor/github.com/docker/docker/client/network_inspect.go create mode 100644 vendor/github.com/docker/docker/client/network_list.go create mode 100644 vendor/github.com/docker/docker/client/network_prune.go create mode 100644 vendor/github.com/docker/docker/client/network_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_inspect.go create mode 100644 vendor/github.com/docker/docker/client/node_list.go create mode 100644 vendor/github.com/docker/docker/client/node_remove.go create mode 100644 vendor/github.com/docker/docker/client/node_update.go create mode 100644 vendor/github.com/docker/docker/client/options.go create mode 100644 vendor/github.com/docker/docker/client/ping.go create mode 100644 vendor/github.com/docker/docker/client/plugin_create.go create mode 100644 vendor/github.com/docker/docker/client/plugin_disable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_enable.go create mode 100644 vendor/github.com/docker/docker/client/plugin_inspect.go create mode 100644 vendor/github.com/docker/docker/client/plugin_install.go create mode 100644 vendor/github.com/docker/docker/client/plugin_list.go create mode 100644 vendor/github.com/docker/docker/client/plugin_push.go create mode 100644 vendor/github.com/docker/docker/client/plugin_remove.go create mode 100644 vendor/github.com/docker/docker/client/plugin_set.go create mode 100644 vendor/github.com/docker/docker/client/plugin_upgrade.go create mode 100644 vendor/github.com/docker/docker/client/request.go create mode 100644 vendor/github.com/docker/docker/client/secret_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go create mode 100644 vendor/github.com/docker/docker/client/secret_list.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove.go create mode 100644 vendor/github.com/docker/docker/client/secret_update.go create mode 100644 vendor/github.com/docker/docker/client/service_create.go create mode 100644 vendor/github.com/docker/docker/client/service_inspect.go create mode 100644 vendor/github.com/docker/docker/client/service_list.go create mode 100644 vendor/github.com/docker/docker/client/service_logs.go create mode 100644 vendor/github.com/docker/docker/client/service_remove.go create mode 100644 vendor/github.com/docker/docker/client/service_update.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/docker/docker/client/swarm_init.go create mode 100644 vendor/github.com/docker/docker/client/swarm_inspect.go create mode 100644 vendor/github.com/docker/docker/client/swarm_join.go create mode 100644 vendor/github.com/docker/docker/client/swarm_leave.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go create mode 100644 vendor/github.com/docker/docker/client/swarm_update.go create mode 100644 vendor/github.com/docker/docker/client/task_inspect.go create mode 100644 vendor/github.com/docker/docker/client/task_list.go create mode 100644 vendor/github.com/docker/docker/client/task_logs.go create mode 100644 vendor/github.com/docker/docker/client/transport.go create mode 100644 vendor/github.com/docker/docker/client/utils.go create mode 100644 vendor/github.com/docker/docker/client/version.go create mode 100644 vendor/github.com/docker/docker/client/volume_create.go create mode 100644 vendor/github.com/docker/docker/client/volume_inspect.go create mode 100644 vendor/github.com/docker/docker/client/volume_list.go create mode 100644 vendor/github.com/docker/docker/client/volume_prune.go create mode 100644 vendor/github.com/docker/docker/client/volume_remove.go create mode 100644 vendor/github.com/docker/docker/errdefs/defs.go create mode 100644 vendor/github.com/docker/docker/errdefs/doc.go create mode 100644 vendor/github.com/docker/docker/errdefs/helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/http_helpers.go create mode 100644 vendor/github.com/docker/docker/errdefs/is.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_others.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/idtools/utils_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/buffer.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/fswriters.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/readers.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go create mode 100644 vendor/github.com/docker/docker/pkg/ioutils/writers.go create mode 100644 vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go create mode 100644 vendor/github.com/docker/docker/pkg/longpath/longpath.go create mode 100644 vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go create mode 100644 vendor/github.com/docker/docker/pkg/stringid/README.md create mode 100644 vendor/github.com/docker/docker/pkg/stringid/stringid.go create mode 100644 vendor/github.com/docker/docker/pkg/system/args_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/chtimes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/errors.go create mode 100644 vendor/github.com/docker/docker/pkg/system/exitcode.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/filesys_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init.go create mode 100644 vendor/github.com/docker/docker/pkg/system/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/lstat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/meminfo_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod.go create mode 100644 vendor/github.com/docker/docker/pkg/system/mknod_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/path_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/process_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/process_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/rm.go create mode 100644 vendor/github.com/docker/docker/pkg/system/rm_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_bsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_darwin.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_openbsd.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_solaris.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/stat_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/syscall_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask.go create mode 100644 vendor/github.com/docker/docker/pkg/system/umask_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/lcow_parser.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/linux_parser.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/mounts.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/parser.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/validate.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_copy.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_unix.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/volume_windows.go create mode 100644 vendor/github.com/docker/docker/volume/mounts/windows_parser.go create mode 100644 vendor/github.com/docker/docker/volume/volume.go create mode 100644 vendor/github.com/docker/go-connections/LICENSE create mode 100644 vendor/github.com/docker/go-connections/nat/nat.go create mode 100644 vendor/github.com/docker/go-connections/nat/parse.go create mode 100644 vendor/github.com/docker/go-connections/nat/sort.go create mode 100644 vendor/github.com/docker/go-connections/sockets/README.md create mode 100644 vendor/github.com/docker/go-connections/sockets/inmem_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/proxy.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_unix.go create mode 100644 vendor/github.com/docker/go-connections/sockets/sockets_windows.go create mode 100644 vendor/github.com/docker/go-connections/sockets/tcp_socket.go create mode 100644 vendor/github.com/docker/go-connections/sockets/unix_socket.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go create mode 100644 vendor/github.com/docker/go-units/CONTRIBUTING.md create mode 100644 vendor/github.com/docker/go-units/LICENSE create mode 100644 vendor/github.com/docker/go-units/MAINTAINERS create mode 100644 vendor/github.com/docker/go-units/README.md create mode 100644 vendor/github.com/docker/go-units/circle.yml create mode 100644 vendor/github.com/docker/go-units/duration.go create mode 100644 vendor/github.com/docker/go-units/size.go create mode 100644 vendor/github.com/docker/go-units/ulimit.go create mode 100644 vendor/github.com/emirpasic/gods/LICENSE create mode 100644 vendor/github.com/emirpasic/gods/containers/containers.go create mode 100644 vendor/github.com/emirpasic/gods/containers/enumerable.go create mode 100644 vendor/github.com/emirpasic/gods/containers/iterator.go create mode 100644 vendor/github.com/emirpasic/gods/containers/serialization.go create mode 100644 vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go create mode 100644 vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go create mode 100644 vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go create mode 100644 vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go create mode 100644 vendor/github.com/emirpasic/gods/lists/lists.go create mode 100644 vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go create mode 100644 vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go create mode 100644 vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go create mode 100644 vendor/github.com/emirpasic/gods/trees/trees.go create mode 100644 vendor/github.com/emirpasic/gods/utils/comparator.go create mode 100644 vendor/github.com/emirpasic/gods/utils/sort.go create mode 100644 vendor/github.com/emirpasic/gods/utils/utils.go delete mode 100644 vendor/github.com/evanphx/json-patch/.travis.yml create mode 100644 vendor/github.com/fatih/color/LICENSE.md create mode 100644 vendor/github.com/fatih/color/README.md create mode 100644 vendor/github.com/fatih/color/color.go create mode 100644 vendor/github.com/fatih/color/doc.go create mode 100644 vendor/github.com/fatih/color/go.mod create mode 100644 vendor/github.com/fatih/color/go.sum create mode 100644 vendor/github.com/gdamore/encoding/.appveyor.yml create mode 100644 vendor/github.com/gdamore/encoding/.travis.yml create mode 100644 vendor/github.com/gdamore/encoding/LICENSE create mode 100644 vendor/github.com/gdamore/encoding/README.md create mode 100644 vendor/github.com/gdamore/encoding/ascii.go create mode 100644 vendor/github.com/gdamore/encoding/charmap.go create mode 100644 vendor/github.com/gdamore/encoding/doc.go create mode 100644 vendor/github.com/gdamore/encoding/ebcdic.go create mode 100644 vendor/github.com/gdamore/encoding/go.mod create mode 100644 vendor/github.com/gdamore/encoding/go.sum create mode 100644 vendor/github.com/gdamore/encoding/latin1.go create mode 100644 vendor/github.com/gdamore/encoding/latin5.go create mode 100644 vendor/github.com/gdamore/encoding/utf8.go create mode 100644 vendor/github.com/gdamore/tcell/v2/.appveyor.yml create mode 100644 vendor/github.com/gdamore/tcell/v2/.gitignore create mode 100644 vendor/github.com/gdamore/tcell/v2/.travis.yml create mode 100644 vendor/github.com/gdamore/tcell/v2/AUTHORS create mode 100644 vendor/github.com/gdamore/tcell/v2/CHANGESv2.md create mode 100644 vendor/github.com/gdamore/tcell/v2/LICENSE create mode 100644 vendor/github.com/gdamore/tcell/v2/README.md create mode 100644 vendor/github.com/gdamore/tcell/v2/TUTORIAL.md create mode 100644 vendor/github.com/gdamore/tcell/v2/attr.go create mode 100644 vendor/github.com/gdamore/tcell/v2/cell.go create mode 100644 vendor/github.com/gdamore/tcell/v2/charset_stub.go create mode 100644 vendor/github.com/gdamore/tcell/v2/charset_unix.go create mode 100644 vendor/github.com/gdamore/tcell/v2/charset_windows.go create mode 100644 vendor/github.com/gdamore/tcell/v2/color.go create mode 100644 vendor/github.com/gdamore/tcell/v2/colorfit.go create mode 100644 vendor/github.com/gdamore/tcell/v2/console_stub.go create mode 100644 vendor/github.com/gdamore/tcell/v2/console_win.go create mode 100644 vendor/github.com/gdamore/tcell/v2/doc.go create mode 100644 vendor/github.com/gdamore/tcell/v2/encoding.go create mode 100644 vendor/github.com/gdamore/tcell/v2/errors.go create mode 100644 vendor/github.com/gdamore/tcell/v2/event.go create mode 100644 vendor/github.com/gdamore/tcell/v2/go.mod create mode 100644 vendor/github.com/gdamore/tcell/v2/go.sum create mode 100644 vendor/github.com/gdamore/tcell/v2/interrupt.go create mode 100644 vendor/github.com/gdamore/tcell/v2/key.go create mode 100644 vendor/github.com/gdamore/tcell/v2/mouse.go create mode 100644 vendor/github.com/gdamore/tcell/v2/nonblock_bsd.go create mode 100644 vendor/github.com/gdamore/tcell/v2/nonblock_unix.go create mode 100644 vendor/github.com/gdamore/tcell/v2/paste.go create mode 100644 vendor/github.com/gdamore/tcell/v2/resize.go create mode 100644 vendor/github.com/gdamore/tcell/v2/runes.go create mode 100644 vendor/github.com/gdamore/tcell/v2/screen.go create mode 100644 vendor/github.com/gdamore/tcell/v2/simulation.go create mode 100644 vendor/github.com/gdamore/tcell/v2/stdin_unix.go create mode 100644 vendor/github.com/gdamore/tcell/v2/style.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/.gitignore create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/README.md create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/TERMINALS.md create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/a/aixterm/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/a/alacritty/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/a/ansi/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/b/beterm/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/base/base.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/c/cygwin/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/d/dtterm/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/dynamic/dynamic.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/e/emacs/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/extended/extended.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/f/foot/foot.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/g/gnome/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/gen.sh create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/h/hpterm/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/k/konsole/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/k/kterm/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/l/linux/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/models.txt create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/p/pcansi/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/r/rxvt/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/s/screen/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/s/simpleterm/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/s/sun/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/t/termite/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/t/tmux/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/terminfo.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/v/vt100/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/v/vt102/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/v/vt220/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/v/vt320/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/v/vt400/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/v/vt420/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/v/vt52/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/w/wy50/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/w/wy60/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/w/wy99_ansi/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/x/xfce/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/direct.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_termite/term.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terms_default.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terms_dynamic.go create mode 100644 vendor/github.com/gdamore/tcell/v2/terms_static.go create mode 100644 vendor/github.com/gdamore/tcell/v2/tscreen.go create mode 100644 vendor/github.com/gdamore/tcell/v2/tscreen_stub.go create mode 100644 vendor/github.com/gdamore/tcell/v2/tscreen_unix.go create mode 100644 vendor/github.com/gdamore/tcell/v2/tty.go create mode 100644 vendor/github.com/gdamore/tcell/v2/tty_unix.go create mode 100644 vendor/github.com/go-git/gcfg/LICENSE create mode 100644 vendor/github.com/go-git/gcfg/README create mode 100644 vendor/github.com/go-git/gcfg/doc.go create mode 100644 vendor/github.com/go-git/gcfg/errors.go create mode 100644 vendor/github.com/go-git/gcfg/go1_0.go create mode 100644 vendor/github.com/go-git/gcfg/go1_2.go create mode 100644 vendor/github.com/go-git/gcfg/read.go create mode 100644 vendor/github.com/go-git/gcfg/scanner/errors.go create mode 100644 vendor/github.com/go-git/gcfg/scanner/scanner.go create mode 100644 vendor/github.com/go-git/gcfg/set.go create mode 100644 vendor/github.com/go-git/gcfg/token/position.go create mode 100644 vendor/github.com/go-git/gcfg/token/serialize.go create mode 100644 vendor/github.com/go-git/gcfg/token/token.go create mode 100644 vendor/github.com/go-git/gcfg/types/bool.go create mode 100644 vendor/github.com/go-git/gcfg/types/doc.go create mode 100644 vendor/github.com/go-git/gcfg/types/enum.go create mode 100644 vendor/github.com/go-git/gcfg/types/int.go create mode 100644 vendor/github.com/go-git/gcfg/types/scan.go create mode 100644 vendor/github.com/go-git/go-billy/v5/.gitignore create mode 100644 vendor/github.com/go-git/go-billy/v5/LICENSE create mode 100644 vendor/github.com/go-git/go-billy/v5/README.md create mode 100644 vendor/github.com/go-git/go-billy/v5/fs.go create mode 100644 vendor/github.com/go-git/go-billy/v5/go.mod create mode 100644 vendor/github.com/go-git/go-billy/v5/go.sum create mode 100644 vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go create mode 100644 vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go create mode 100644 vendor/github.com/go-git/go-billy/v5/memfs/memory.go create mode 100644 vendor/github.com/go-git/go-billy/v5/memfs/storage.go create mode 100644 vendor/github.com/go-git/go-billy/v5/osfs/os.go create mode 100644 vendor/github.com/go-git/go-billy/v5/osfs/os_js.go create mode 100644 vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go create mode 100644 vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go create mode 100644 vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go create mode 100644 vendor/github.com/go-git/go-billy/v5/util/glob.go create mode 100644 vendor/github.com/go-git/go-billy/v5/util/util.go create mode 100644 vendor/github.com/go-git/go-git/v5/.gitignore create mode 100644 vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md create mode 100644 vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md create mode 100644 vendor/github.com/go-git/go-git/v5/LICENSE create mode 100644 vendor/github.com/go-git/go-git/v5/Makefile create mode 100644 vendor/github.com/go-git/go-git/v5/README.md create mode 100644 vendor/github.com/go-git/go-git/v5/blame.go create mode 100644 vendor/github.com/go-git/go-git/v5/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/config/branch.go create mode 100644 vendor/github.com/go-git/go-git/v5/config/config.go create mode 100644 vendor/github.com/go-git/go-git/v5/config/modules.go create mode 100644 vendor/github.com/go-git/go-git/v5/config/refspec.go create mode 100644 vendor/github.com/go-git/go-git/v5/config/url.go create mode 100644 vendor/github.com/go-git/go-git/v5/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/go.mod create mode 100644 vendor/github.com/go-git/go-git/v5/go.sum create mode 100644 vendor/github.com/go-git/go-git/v5/internal/revision/parser.go create mode 100644 vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go create mode 100644 vendor/github.com/go-git/go-git/v5/internal/revision/token.go create mode 100644 vendor/github.com/go-git/go-git/v5/internal/url/url.go create mode 100644 vendor/github.com/go-git/go-git/v5/object_walker.go create mode 100644 vendor/github.com/go-git/go-git/v5/options.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/color/color.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/error.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/hash.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/memory.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/change.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/file.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/object.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_decode.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/updreq_encode.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackreq.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/uppackresp.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/reference.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/revision.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/revlist/revlist.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/storer/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/storer/index.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/storer/object.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/storer/reference.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/storer/shallow.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/storer/storer.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/client/client.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/file/server.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/git/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/http/receive_pack.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/http/upload_pack.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/internal/common/server.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/server/server.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/auth_method.go create mode 100644 vendor/github.com/go-git/go-git/v5/plumbing/transport/ssh/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/prune.go create mode 100644 vendor/github.com/go-git/go-git/v5/references.go create mode 100644 vendor/github.com/go-git/go-git/v5/remote.go create mode 100644 vendor/github.com/go-git/go-git/v5/repository.go create mode 100644 vendor/github.com/go-git/go-git/v5/status.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/config.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/deltaobject.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit_setref.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/repository_filesystem.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/writers.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/module.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/reference.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/shallow.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/filesystem/storage.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/memory/storage.go create mode 100644 vendor/github.com/go-git/go-git/v5/storage/storer.go create mode 100644 vendor/github.com/go-git/go-git/v5/submodule.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/binary/read.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/binary/write.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/diff/diff.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/ioutil/common.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/ioutil/pipe_js.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/doc.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/doubleiter.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/filesystem/node.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/index/node.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/internal/frame/frame.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/iter.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/noder.go create mode 100644 vendor/github.com/go-git/go-git/v5/utils/merkletrie/noder/path.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_bsd.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_commit.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_js.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_linux.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_plan9.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_status.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_unix_other.go create mode 100644 vendor/github.com/go-git/go-git/v5/worktree_windows.go create mode 100644 vendor/github.com/gobuffalo/here/.gitignore create mode 100644 vendor/github.com/gobuffalo/here/.goreleaser.yml create mode 100644 vendor/github.com/gobuffalo/here/LICENSE create mode 100644 vendor/github.com/gobuffalo/here/Makefile create mode 100644 vendor/github.com/gobuffalo/here/README.md create mode 100644 vendor/github.com/gobuffalo/here/SHOULDERS.md create mode 100644 vendor/github.com/gobuffalo/here/current.go create mode 100644 vendor/github.com/gobuffalo/here/dir.go create mode 100644 vendor/github.com/gobuffalo/here/go.mod create mode 100644 vendor/github.com/gobuffalo/here/go.sum create mode 100644 vendor/github.com/gobuffalo/here/here.go create mode 100644 vendor/github.com/gobuffalo/here/info.go create mode 100644 vendor/github.com/gobuffalo/here/info_map.go create mode 100644 vendor/github.com/gobuffalo/here/module.go create mode 100644 vendor/github.com/gobuffalo/here/parse.go create mode 100644 vendor/github.com/gobuffalo/here/path.go create mode 100644 vendor/github.com/gobuffalo/here/pkg.go create mode 100644 vendor/github.com/gobuffalo/here/version.go create mode 100644 vendor/github.com/google/cel-go/LICENSE create mode 100644 vendor/github.com/google/cel-go/cel/cel.go create mode 100644 vendor/github.com/google/cel-go/cel/env.go create mode 100644 vendor/github.com/google/cel-go/cel/io.go create mode 100644 vendor/github.com/google/cel-go/cel/library.go create mode 100644 vendor/github.com/google/cel-go/cel/options.go create mode 100644 vendor/github.com/google/cel-go/cel/program.go create mode 100644 vendor/github.com/google/cel-go/checker/checker.go create mode 100644 vendor/github.com/google/cel-go/checker/decls/decls.go create mode 100644 vendor/github.com/google/cel-go/checker/decls/scopes.go create mode 100644 vendor/github.com/google/cel-go/checker/env.go create mode 100644 vendor/github.com/google/cel-go/checker/errors.go create mode 100644 vendor/github.com/google/cel-go/checker/mapping.go create mode 100644 vendor/github.com/google/cel-go/checker/printer.go create mode 100644 vendor/github.com/google/cel-go/checker/standard.go create mode 100644 vendor/github.com/google/cel-go/checker/types.go create mode 100644 vendor/github.com/google/cel-go/common/containers/container.go create mode 100644 vendor/github.com/google/cel-go/common/debug/debug.go create mode 100644 vendor/github.com/google/cel-go/common/doc.go create mode 100644 vendor/github.com/google/cel-go/common/error.go create mode 100644 vendor/github.com/google/cel-go/common/errors.go create mode 100644 vendor/github.com/google/cel-go/common/location.go create mode 100644 vendor/github.com/google/cel-go/common/operators/operators.go create mode 100644 vendor/github.com/google/cel-go/common/overloads/overloads.go create mode 100644 vendor/github.com/google/cel-go/common/source.go create mode 100644 vendor/github.com/google/cel-go/common/types/any_value.go create mode 100644 vendor/github.com/google/cel-go/common/types/bool.go create mode 100644 vendor/github.com/google/cel-go/common/types/bytes.go create mode 100644 vendor/github.com/google/cel-go/common/types/doc.go create mode 100644 vendor/github.com/google/cel-go/common/types/double.go create mode 100644 vendor/github.com/google/cel-go/common/types/duration.go create mode 100644 vendor/github.com/google/cel-go/common/types/err.go create mode 100644 vendor/github.com/google/cel-go/common/types/int.go create mode 100644 vendor/github.com/google/cel-go/common/types/iterator.go create mode 100644 vendor/github.com/google/cel-go/common/types/json_value.go create mode 100644 vendor/github.com/google/cel-go/common/types/list.go create mode 100644 vendor/github.com/google/cel-go/common/types/map.go create mode 100644 vendor/github.com/google/cel-go/common/types/null.go create mode 100644 vendor/github.com/google/cel-go/common/types/object.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/checked.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/enum.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/file.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/pb.go create mode 100644 vendor/github.com/google/cel-go/common/types/pb/type.go create mode 100644 vendor/github.com/google/cel-go/common/types/provider.go create mode 100644 vendor/github.com/google/cel-go/common/types/ref/provider.go create mode 100644 vendor/github.com/google/cel-go/common/types/ref/reference.go create mode 100644 vendor/github.com/google/cel-go/common/types/string.go create mode 100644 vendor/github.com/google/cel-go/common/types/timestamp.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/comparer.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/container.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/field_tester.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/indexer.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/iterator.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/lister.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/mapper.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/matcher.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/math.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/receiver.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/sizer.go create mode 100644 vendor/github.com/google/cel-go/common/types/traits/traits.go create mode 100644 vendor/github.com/google/cel-go/common/types/type.go create mode 100644 vendor/github.com/google/cel-go/common/types/uint.go create mode 100644 vendor/github.com/google/cel-go/common/types/unknown.go create mode 100644 vendor/github.com/google/cel-go/common/types/util.go create mode 100644 vendor/github.com/google/cel-go/interpreter/activation.go create mode 100644 vendor/github.com/google/cel-go/interpreter/attribute_patterns.go create mode 100644 vendor/github.com/google/cel-go/interpreter/attributes.go create mode 100644 vendor/github.com/google/cel-go/interpreter/coster.go create mode 100644 vendor/github.com/google/cel-go/interpreter/decorators.go create mode 100644 vendor/github.com/google/cel-go/interpreter/dispatcher.go create mode 100644 vendor/github.com/google/cel-go/interpreter/evalstate.go create mode 100644 vendor/github.com/google/cel-go/interpreter/functions/functions.go create mode 100644 vendor/github.com/google/cel-go/interpreter/functions/standard.go create mode 100644 vendor/github.com/google/cel-go/interpreter/interpretable.go create mode 100644 vendor/github.com/google/cel-go/interpreter/interpreter.go create mode 100644 vendor/github.com/google/cel-go/interpreter/planner.go create mode 100644 vendor/github.com/google/cel-go/interpreter/prune.go create mode 100644 vendor/github.com/google/cel-go/parser/errors.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/CEL.g4 create mode 100644 vendor/github.com/google/cel-go/parser/gen/CEL.tokens create mode 100644 vendor/github.com/google/cel-go/parser/gen/CELLexer.tokens create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_base_listener.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_base_visitor.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_lexer.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_listener.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_parser.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/cel_visitor.go create mode 100644 vendor/github.com/google/cel-go/parser/gen/doc.go create mode 100644 vendor/github.com/google/cel-go/parser/helper.go create mode 100644 vendor/github.com/google/cel-go/parser/macro.go create mode 100644 vendor/github.com/google/cel-go/parser/parser.go create mode 100644 vendor/github.com/google/cel-go/parser/unescape.go create mode 100644 vendor/github.com/google/cel-go/parser/unparser.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/and/and_closer.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/estargz/estargz.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/gzip/zip.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/redact/redact.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/retry/retry.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/retry/wait/kubernetes_apimachinery_wait.go create mode 100644 vendor/github.com/google/go-containerregistry/internal/verify/verify.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/anon.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/auth.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/authn.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/basic.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/bearer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/logs/logs.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/config.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/daemon/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/daemon/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/daemon/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/daemon/options.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/daemon/write.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/empty/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/hash.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/blob.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/layoutpath.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/options.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/read.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/layout/write.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/match/match.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/mutate/rebase.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/partial/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/partial/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/partial/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/partial/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/platform.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/progress.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/catalog.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/check.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/descriptor.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/layer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/multi_write.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/stream/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/README.md create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/doc.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/types/types.go create mode 100644 vendor/github.com/google/go-containerregistry/pkg/v1/zz_deepcopy_generated.go create mode 100644 vendor/github.com/hako/durafmt/.gitignore create mode 100644 vendor/github.com/hako/durafmt/.travis.yml create mode 100644 vendor/github.com/hako/durafmt/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/hako/durafmt/CONTRIBUTING.md create mode 100644 vendor/github.com/hako/durafmt/LICENSE create mode 100644 vendor/github.com/hako/durafmt/README.md create mode 100644 vendor/github.com/hako/durafmt/durafmt.go create mode 100644 vendor/github.com/hashicorp/errwrap/LICENSE create mode 100644 vendor/github.com/hashicorp/errwrap/README.md create mode 100644 vendor/github.com/hashicorp/errwrap/errwrap.go create mode 100644 vendor/github.com/hashicorp/errwrap/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/LICENSE create mode 100644 vendor/github.com/hashicorp/go-multierror/Makefile create mode 100644 vendor/github.com/hashicorp/go-multierror/README.md create mode 100644 vendor/github.com/hashicorp/go-multierror/append.go create mode 100644 vendor/github.com/hashicorp/go-multierror/flatten.go create mode 100644 vendor/github.com/hashicorp/go-multierror/format.go create mode 100644 vendor/github.com/hashicorp/go-multierror/go.mod create mode 100644 vendor/github.com/hashicorp/go-multierror/go.sum create mode 100644 vendor/github.com/hashicorp/go-multierror/group.go create mode 100644 vendor/github.com/hashicorp/go-multierror/multierror.go create mode 100644 vendor/github.com/hashicorp/go-multierror/prefix.go create mode 100644 vendor/github.com/hashicorp/go-multierror/sort.go create mode 100644 vendor/github.com/heroku/color/.gitignore create mode 100644 vendor/github.com/heroku/color/.golangcli.yml create mode 100644 vendor/github.com/heroku/color/LICENSE create mode 100644 vendor/github.com/heroku/color/Makefile create mode 100644 vendor/github.com/heroku/color/README.md create mode 100644 vendor/github.com/heroku/color/attributes.go create mode 100644 vendor/github.com/heroku/color/cache.go create mode 100644 vendor/github.com/heroku/color/color.go create mode 100644 vendor/github.com/heroku/color/console.go create mode 100644 vendor/github.com/heroku/color/go.mod create mode 100644 vendor/github.com/heroku/color/go.sum create mode 100644 vendor/github.com/jbenet/go-context/LICENSE create mode 100644 vendor/github.com/jbenet/go-context/io/ctxio.go create mode 100644 vendor/github.com/jonboulle/clockwork/.editorconfig create mode 100644 vendor/github.com/jonboulle/clockwork/.gitignore create mode 100644 vendor/github.com/jonboulle/clockwork/LICENSE create mode 100644 vendor/github.com/jonboulle/clockwork/README.md create mode 100644 vendor/github.com/jonboulle/clockwork/clockwork.go create mode 100644 vendor/github.com/jonboulle/clockwork/go.mod create mode 100644 vendor/github.com/jonboulle/clockwork/ticker.go create mode 100644 vendor/github.com/kballard/go-shellquote/LICENSE create mode 100644 vendor/github.com/kballard/go-shellquote/README create mode 100644 vendor/github.com/kballard/go-shellquote/doc.go create mode 100644 vendor/github.com/kballard/go-shellquote/quote.go create mode 100644 vendor/github.com/kballard/go-shellquote/unquote.go create mode 100644 vendor/github.com/kevinburke/ssh_config/.gitattributes create mode 100644 vendor/github.com/kevinburke/ssh_config/.gitignore create mode 100644 vendor/github.com/kevinburke/ssh_config/.mailmap create mode 100644 vendor/github.com/kevinburke/ssh_config/.travis.yml create mode 100644 vendor/github.com/kevinburke/ssh_config/AUTHORS.txt create mode 100644 vendor/github.com/kevinburke/ssh_config/LICENSE create mode 100644 vendor/github.com/kevinburke/ssh_config/Makefile create mode 100644 vendor/github.com/kevinburke/ssh_config/README.md create mode 100644 vendor/github.com/kevinburke/ssh_config/config.go create mode 100644 vendor/github.com/kevinburke/ssh_config/lexer.go create mode 100644 vendor/github.com/kevinburke/ssh_config/parser.go create mode 100644 vendor/github.com/kevinburke/ssh_config/position.go create mode 100644 vendor/github.com/kevinburke/ssh_config/token.go create mode 100644 vendor/github.com/kevinburke/ssh_config/validators.go create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/.gitignore create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/CHANGELOG.md create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/LICENSE create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/README.md create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/colorgens.go create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/colors.go create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/go.mod create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/happy_palettegen.go create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/hexcolor.go create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/hsluv-snapshot-rev4.json create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/hsluv.go create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/soft_palettegen.go create mode 100644 vendor/github.com/lucasb-eyer/go-colorful/warm_palettegen.go create mode 100644 vendor/github.com/markbates/pkger/.gitignore create mode 100644 vendor/github.com/markbates/pkger/.goreleaser.yml create mode 100644 vendor/github.com/markbates/pkger/LICENSE create mode 100644 vendor/github.com/markbates/pkger/Makefile create mode 100644 vendor/github.com/markbates/pkger/README.md create mode 100644 vendor/github.com/markbates/pkger/SHOULDERS.md create mode 100644 vendor/github.com/markbates/pkger/apply.go create mode 100644 vendor/github.com/markbates/pkger/go.mod create mode 100644 vendor/github.com/markbates/pkger/go.sum create mode 100644 vendor/github.com/markbates/pkger/here/here.go create mode 100644 vendor/github.com/markbates/pkger/internal/maps/files.go create mode 100644 vendor/github.com/markbates/pkger/internal/maps/infos.go create mode 100644 vendor/github.com/markbates/pkger/pkger.go create mode 100644 vendor/github.com/markbates/pkger/pkging/embed/embed.go create mode 100644 vendor/github.com/markbates/pkger/pkging/embed/file.go create mode 100644 vendor/github.com/markbates/pkger/pkging/faces.go create mode 100644 vendor/github.com/markbates/pkger/pkging/file.go create mode 100644 vendor/github.com/markbates/pkger/pkging/file_info.go create mode 100644 vendor/github.com/markbates/pkger/pkging/mem/add.go create mode 100644 vendor/github.com/markbates/pkger/pkging/mem/embed.go create mode 100644 vendor/github.com/markbates/pkger/pkging/mem/file.go create mode 100644 vendor/github.com/markbates/pkger/pkging/mem/mem.go create mode 100644 vendor/github.com/markbates/pkger/pkging/mod_time.go create mode 100644 vendor/github.com/markbates/pkger/pkging/pkger.go create mode 100644 vendor/github.com/markbates/pkger/pkging/stdos/file.go create mode 100644 vendor/github.com/markbates/pkger/pkging/stdos/json.go create mode 100644 vendor/github.com/markbates/pkger/pkging/stdos/stdos.go create mode 100644 vendor/github.com/markbates/pkger/pkging/wrap.go create mode 100644 vendor/github.com/markbates/pkger/version.go create mode 100644 vendor/github.com/mattn/go-colorable/LICENSE create mode 100644 vendor/github.com/mattn/go-colorable/README.md create mode 100644 vendor/github.com/mattn/go-colorable/colorable_appengine.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_others.go create mode 100644 vendor/github.com/mattn/go-colorable/colorable_windows.go create mode 100644 vendor/github.com/mattn/go-colorable/go.mod create mode 100644 vendor/github.com/mattn/go-colorable/go.sum create mode 100644 vendor/github.com/mattn/go-colorable/go.test.sh create mode 100644 vendor/github.com/mattn/go-colorable/noncolorable.go create mode 100644 vendor/github.com/mattn/go-isatty/LICENSE create mode 100644 vendor/github.com/mattn/go-isatty/README.md create mode 100644 vendor/github.com/mattn/go-isatty/doc.go create mode 100644 vendor/github.com/mattn/go-isatty/go.mod create mode 100644 vendor/github.com/mattn/go-isatty/go.sum create mode 100644 vendor/github.com/mattn/go-isatty/go.test.sh create mode 100644 vendor/github.com/mattn/go-isatty/isatty_bsd.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_others.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_plan9.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_solaris.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_tcgets.go create mode 100644 vendor/github.com/mattn/go-isatty/isatty_windows.go create mode 100644 vendor/github.com/mattn/go-runewidth/.travis.yml create mode 100644 vendor/github.com/mattn/go-runewidth/LICENSE create mode 100644 vendor/github.com/mattn/go-runewidth/README.md create mode 100644 vendor/github.com/mattn/go-runewidth/go.mod create mode 100644 vendor/github.com/mattn/go-runewidth/go.sum create mode 100644 vendor/github.com/mattn/go-runewidth/go.test.sh create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_appengine.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_js.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_posix.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_table.go create mode 100644 vendor/github.com/mattn/go-runewidth/runewidth_windows.go create mode 100644 vendor/github.com/mgutz/ansi/.gitignore create mode 100644 vendor/github.com/mgutz/ansi/LICENSE create mode 100644 vendor/github.com/mgutz/ansi/README.md create mode 100644 vendor/github.com/mgutz/ansi/ansi.go create mode 100644 vendor/github.com/mgutz/ansi/doc.go create mode 100644 vendor/github.com/mgutz/ansi/print.go create mode 100644 vendor/github.com/mitchellh/ioprogress/LICENSE create mode 100644 vendor/github.com/mitchellh/ioprogress/README.md create mode 100644 vendor/github.com/mitchellh/ioprogress/draw.go create mode 100644 vendor/github.com/mitchellh/ioprogress/reader.go create mode 100644 vendor/github.com/moby/spdystream/CONTRIBUTING.md create mode 100644 vendor/github.com/moby/spdystream/LICENSE create mode 100644 vendor/github.com/moby/spdystream/MAINTAINERS create mode 100644 vendor/github.com/moby/spdystream/NOTICE create mode 100644 vendor/github.com/moby/spdystream/README.md create mode 100644 vendor/github.com/moby/spdystream/connection.go create mode 100644 vendor/github.com/moby/spdystream/go.mod create mode 100644 vendor/github.com/moby/spdystream/go.sum create mode 100644 vendor/github.com/moby/spdystream/handlers.go create mode 100644 vendor/github.com/moby/spdystream/priority.go create mode 100644 vendor/github.com/moby/spdystream/spdy/dictionary.go create mode 100644 vendor/github.com/moby/spdystream/spdy/read.go create mode 100644 vendor/github.com/moby/spdystream/spdy/types.go create mode 100644 vendor/github.com/moby/spdystream/spdy/write.go create mode 100644 vendor/github.com/moby/spdystream/stream.go create mode 100644 vendor/github.com/moby/spdystream/utils.go create mode 100644 vendor/github.com/moby/sys/mount/LICENSE create mode 100644 vendor/github.com/moby/sys/mount/doc.go create mode 100644 vendor/github.com/moby/sys/mount/flags_bsd.go create mode 100644 vendor/github.com/moby/sys/mount/flags_linux.go create mode 100644 vendor/github.com/moby/sys/mount/flags_unix.go create mode 100644 vendor/github.com/moby/sys/mount/go.mod create mode 100644 vendor/github.com/moby/sys/mount/go.sum create mode 100644 vendor/github.com/moby/sys/mount/mount_errors.go create mode 100644 vendor/github.com/moby/sys/mount/mount_unix.go create mode 100644 vendor/github.com/moby/sys/mount/mounter_bsd.go create mode 100644 vendor/github.com/moby/sys/mount/mounter_linux.go create mode 100644 vendor/github.com/moby/sys/mount/mounter_unsupported.go create mode 100644 vendor/github.com/moby/sys/mount/sharedsubtree_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/LICENSE create mode 100644 vendor/github.com/moby/sys/mountinfo/doc.go create mode 100644 vendor/github.com/moby/sys/mountinfo/go.mod create mode 100644 vendor/github.com/moby/sys/mountinfo/go.sum create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mounted_unix.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_bsd.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_filters.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_linux.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_unsupported.go create mode 100644 vendor/github.com/moby/sys/mountinfo/mountinfo_windows.go create mode 100644 vendor/github.com/moby/term/.gitignore create mode 100644 vendor/github.com/moby/term/LICENSE create mode 100644 vendor/github.com/moby/term/README.md create mode 100644 vendor/github.com/moby/term/ascii.go create mode 100644 vendor/github.com/moby/term/go.mod create mode 100644 vendor/github.com/moby/term/go.sum create mode 100644 vendor/github.com/moby/term/proxy.go create mode 100644 vendor/github.com/moby/term/tc.go create mode 100644 vendor/github.com/moby/term/term.go create mode 100644 vendor/github.com/moby/term/term_windows.go create mode 100644 vendor/github.com/moby/term/termios.go create mode 100644 vendor/github.com/moby/term/termios_bsd.go create mode 100644 vendor/github.com/moby/term/termios_nonbsd.go create mode 100644 vendor/github.com/moby/term/windows/ansi_reader.go create mode 100644 vendor/github.com/moby/term/windows/ansi_writer.go create mode 100644 vendor/github.com/moby/term/windows/console.go create mode 100644 vendor/github.com/moby/term/windows/doc.go create mode 100644 vendor/github.com/moby/term/winsize.go create mode 100644 vendor/github.com/morikuni/aec/LICENSE create mode 100644 vendor/github.com/morikuni/aec/README.md create mode 100644 vendor/github.com/morikuni/aec/aec.go create mode 100644 vendor/github.com/morikuni/aec/ansi.go create mode 100644 vendor/github.com/morikuni/aec/builder.go create mode 100644 vendor/github.com/morikuni/aec/sample.gif create mode 100644 vendor/github.com/morikuni/aec/sgr.go create mode 100644 vendor/github.com/opencontainers/go-digest/.mailmap create mode 100644 vendor/github.com/opencontainers/go-digest/.pullapprove.yml create mode 100644 vendor/github.com/opencontainers/go-digest/.travis.yml create mode 100644 vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE create mode 100644 vendor/github.com/opencontainers/go-digest/LICENSE.docs create mode 100644 vendor/github.com/opencontainers/go-digest/MAINTAINERS create mode 100644 vendor/github.com/opencontainers/go-digest/README.md create mode 100644 vendor/github.com/opencontainers/go-digest/algorithm.go create mode 100644 vendor/github.com/opencontainers/go-digest/digest.go create mode 100644 vendor/github.com/opencontainers/go-digest/digester.go create mode 100644 vendor/github.com/opencontainers/go-digest/doc.go create mode 100644 vendor/github.com/opencontainers/go-digest/go.mod create mode 100644 vendor/github.com/opencontainers/go-digest/verifiers.go create mode 100644 vendor/github.com/opencontainers/image-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/image-spec/specs-go/versioned.go create mode 100644 vendor/github.com/opencontainers/runc/LICENSE create mode 100644 vendor/github.com/opencontainers/runc/NOTICE create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user.go create mode 100644 vendor/github.com/opencontainers/runc/libcontainer/user/user_fuzzer.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/LICENSE create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/config.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/state.go create mode 100644 vendor/github.com/opencontainers/runtime-spec/specs-go/version.go create mode 100644 vendor/github.com/opencontainers/selinux/LICENSE create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/doc.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/label/label_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/selinux_stub.go create mode 100644 vendor/github.com/opencontainers/selinux/go-selinux/xattrs_linux.go create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/README.md create mode 100644 vendor/github.com/opencontainers/selinux/pkg/pwalk/pwalk.go create mode 100644 vendor/github.com/ory/viper/.editorconfig create mode 100644 vendor/github.com/ory/viper/.gitignore create mode 100644 vendor/github.com/ory/viper/.golangci.yml create mode 100644 vendor/github.com/ory/viper/.travis.yml create mode 100644 vendor/github.com/ory/viper/LICENSE create mode 100644 vendor/github.com/ory/viper/Makefile create mode 100644 vendor/github.com/ory/viper/README.md create mode 100644 vendor/github.com/ory/viper/flags.go create mode 100644 vendor/github.com/ory/viper/go.mod create mode 100644 vendor/github.com/ory/viper/go.sum create mode 100644 vendor/github.com/ory/viper/util.go create mode 100644 vendor/github.com/ory/viper/viper.go create mode 100644 vendor/github.com/rivo/tview/CODE_OF_CONDUCT.md create mode 100644 vendor/github.com/rivo/tview/CONTRIBUTING.md create mode 100644 vendor/github.com/rivo/tview/LICENSE.txt create mode 100644 vendor/github.com/rivo/tview/README.md create mode 100644 vendor/github.com/rivo/tview/ansi.go create mode 100644 vendor/github.com/rivo/tview/application.go create mode 100644 vendor/github.com/rivo/tview/borders.go create mode 100644 vendor/github.com/rivo/tview/box.go create mode 100644 vendor/github.com/rivo/tview/button.go create mode 100644 vendor/github.com/rivo/tview/checkbox.go create mode 100644 vendor/github.com/rivo/tview/doc.go create mode 100644 vendor/github.com/rivo/tview/dropdown.go create mode 100644 vendor/github.com/rivo/tview/flex.go create mode 100644 vendor/github.com/rivo/tview/form.go create mode 100644 vendor/github.com/rivo/tview/frame.go create mode 100644 vendor/github.com/rivo/tview/go.mod create mode 100644 vendor/github.com/rivo/tview/go.sum create mode 100644 vendor/github.com/rivo/tview/grid.go create mode 100644 vendor/github.com/rivo/tview/inputfield.go create mode 100644 vendor/github.com/rivo/tview/list.go create mode 100644 vendor/github.com/rivo/tview/modal.go create mode 100644 vendor/github.com/rivo/tview/pages.go create mode 100644 vendor/github.com/rivo/tview/primitive.go create mode 100644 vendor/github.com/rivo/tview/semigraphics.go create mode 100644 vendor/github.com/rivo/tview/styles.go create mode 100644 vendor/github.com/rivo/tview/table.go create mode 100644 vendor/github.com/rivo/tview/textview.go create mode 100644 vendor/github.com/rivo/tview/treeview.go create mode 100644 vendor/github.com/rivo/tview/tview.gif create mode 100644 vendor/github.com/rivo/tview/util.go create mode 100644 vendor/github.com/rivo/uniseg/LICENSE.txt create mode 100644 vendor/github.com/rivo/uniseg/README.md create mode 100644 vendor/github.com/rivo/uniseg/doc.go create mode 100644 vendor/github.com/rivo/uniseg/go.mod create mode 100644 vendor/github.com/rivo/uniseg/grapheme.go create mode 100644 vendor/github.com/rivo/uniseg/properties.go create mode 100644 vendor/github.com/sabhiram/go-gitignore/.gitignore create mode 100644 vendor/github.com/sabhiram/go-gitignore/.travis.yml create mode 100644 vendor/github.com/sabhiram/go-gitignore/LICENSE create mode 100644 vendor/github.com/sabhiram/go-gitignore/README.md create mode 100644 vendor/github.com/sabhiram/go-gitignore/ignore.go create mode 100644 vendor/github.com/sabhiram/go-gitignore/version_gen.go create mode 100644 vendor/github.com/sergi/go-diff/AUTHORS create mode 100644 vendor/github.com/sergi/go-diff/CONTRIBUTORS create mode 100644 vendor/github.com/sergi/go-diff/LICENSE create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diff.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/diffmatchpatch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/match.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/mathutil.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/operation_string.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/patch.go create mode 100644 vendor/github.com/sergi/go-diff/diffmatchpatch/stringutil.go create mode 100644 vendor/github.com/sirupsen/logrus/.gitignore create mode 100644 vendor/github.com/sirupsen/logrus/.golangci.yml create mode 100644 vendor/github.com/sirupsen/logrus/.travis.yml create mode 100644 vendor/github.com/sirupsen/logrus/CHANGELOG.md create mode 100644 vendor/github.com/sirupsen/logrus/LICENSE create mode 100644 vendor/github.com/sirupsen/logrus/README.md create mode 100644 vendor/github.com/sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/sirupsen/logrus/appveyor.yml create mode 100644 vendor/github.com/sirupsen/logrus/buffer_pool.go create mode 100644 vendor/github.com/sirupsen/logrus/doc.go create mode 100644 vendor/github.com/sirupsen/logrus/entry.go create mode 100644 vendor/github.com/sirupsen/logrus/exported.go create mode 100644 vendor/github.com/sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/go.mod create mode 100644 vendor/github.com/sirupsen/logrus/go.sum create mode 100644 vendor/github.com/sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/logger.go create mode 100644 vendor/github.com/sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_appengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_bsd.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_js.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_unix.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_check_windows.go create mode 100644 vendor/github.com/sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/sirupsen/logrus/writer.go create mode 100644 vendor/github.com/src-d/gcfg/LICENSE create mode 100644 vendor/github.com/src-d/gcfg/README create mode 100644 vendor/github.com/src-d/gcfg/doc.go create mode 100644 vendor/github.com/src-d/gcfg/errors.go create mode 100644 vendor/github.com/src-d/gcfg/go1_0.go create mode 100644 vendor/github.com/src-d/gcfg/go1_2.go create mode 100644 vendor/github.com/src-d/gcfg/read.go create mode 100644 vendor/github.com/src-d/gcfg/scanner/errors.go create mode 100644 vendor/github.com/src-d/gcfg/scanner/scanner.go create mode 100644 vendor/github.com/src-d/gcfg/set.go create mode 100644 vendor/github.com/src-d/gcfg/token/position.go create mode 100644 vendor/github.com/src-d/gcfg/token/serialize.go create mode 100644 vendor/github.com/src-d/gcfg/token/token.go create mode 100644 vendor/github.com/src-d/gcfg/types/bool.go create mode 100644 vendor/github.com/src-d/gcfg/types/doc.go create mode 100644 vendor/github.com/src-d/gcfg/types/enum.go create mode 100644 vendor/github.com/src-d/gcfg/types/int.go create mode 100644 vendor/github.com/src-d/gcfg/types/scan.go create mode 100644 vendor/github.com/stoewer/go-strcase/.gitignore create mode 100644 vendor/github.com/stoewer/go-strcase/.golangci.yml create mode 100644 vendor/github.com/stoewer/go-strcase/LICENSE create mode 100644 vendor/github.com/stoewer/go-strcase/README.md create mode 100644 vendor/github.com/stoewer/go-strcase/camel.go create mode 100644 vendor/github.com/stoewer/go-strcase/doc.go create mode 100644 vendor/github.com/stoewer/go-strcase/go.mod create mode 100644 vendor/github.com/stoewer/go-strcase/go.sum create mode 100644 vendor/github.com/stoewer/go-strcase/helper.go create mode 100644 vendor/github.com/stoewer/go-strcase/kebab.go create mode 100644 vendor/github.com/stoewer/go-strcase/snake.go create mode 100644 vendor/github.com/syndtr/gocapability/LICENSE create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_linux.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/capability_noop.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/enum_gen.go create mode 100644 vendor/github.com/syndtr/gocapability/capability/syscall_linux.go create mode 100644 vendor/github.com/tektoncd/cli/LICENSE create mode 100644 vendor/github.com/tektoncd/cli/pkg/actions/create.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/actions/delete.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/actions/get.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/actions/gvr.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/actions/list.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/actions/patch.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/actions/watch.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/cli/interface.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/cli/params.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/address.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/color.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/completion.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/conditions.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/description.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/k8s.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/param.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/results.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/task.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/time.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/version.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/formatted/workspace.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/pipelinerun/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/pipelinerun/sort/by_namespace.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/pipelinerun/sort/by_start_time.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/pipelinerun/tracker.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/printer/printer.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/task/task.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/task/tasklastrun.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/create.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/get.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/list/list.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/patch.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/sort/by_namespace.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/sort/by_start_time.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/taskrun.go create mode 100644 vendor/github.com/tektoncd/cli/pkg/taskrun/watch.go create mode 100644 vendor/github.com/tektoncd/pipeline/LICENSE create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_bucket.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/artifact_pvc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/default.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/feature_flags.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/metrics.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/store.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/config/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/controller.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/images.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/options.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/paths.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/pod/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/pod/template.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/pod/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/storagetypes.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/cluster_task_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/condition_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/container_replacements.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/conversion_error.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/param_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_resource_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipeline_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pipelinerun_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/pod.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_paths.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/resource_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/run_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/step_replacements.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/storage_resource.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/task_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/taskrun_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/workspace_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/cluster_task_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/condition_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/container_replacements.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/conversion_error.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/merge.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/openapi_generated.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_context.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/param_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipeline_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pipelinerun_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/pod.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_paths.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resource_types_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/resultref.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/sidecar_replacements.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/step_replacements.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/swagger.json create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/task_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_conversion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/taskrun_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/version_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/when_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/workspace_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_defaults.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipeline_resource_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/pipelineresource_validation.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1/runstatus_types.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/run/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/apis/validate/metadata.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipeline_client.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/run.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipeline_client.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/factory.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/generic.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/run.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/interface.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/condition.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/expansion_generated.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/run.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1alpha1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/clustertask.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/expansion_generated.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/pipeline.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/pipelinerun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/task.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/listers/pipeline/v1beta1/taskrun.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/clientset.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1/doc.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1/pipelineresource.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1/resource_client.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/list/diff.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/reconciler/pipeline/dag/dag.go create mode 100644 vendor/github.com/tektoncd/pipeline/pkg/substitution/substitution.go create mode 100644 vendor/github.com/tektoncd/triggers/LICENSE create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/config/allowed_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/config/default.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/config/doc.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/config/feature_flags.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/config/store.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/config/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/contexts/contexts.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/register.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_interceptor_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/cluster_trigger_binding_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/doc.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/event_listener_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/interceptor_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/param.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/register.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_interface.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_binding_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_template_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_types_convert.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/trigger_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/cluster_trigger_binding_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/doc.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/event_listener_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/interceptor_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/param.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/register.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_interface.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_binding_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_defaults.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_template_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_types_convert.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/trigger_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/version_validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/apis/triggers/validation.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/clientset.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/doc.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/clusterinterceptor.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/clustertriggerbinding.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/doc.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/eventlistener.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/trigger.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggerbinding.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggers_client.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1alpha1/triggertemplate.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/clustertriggerbinding.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/doc.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/eventlistener.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/generated_expansion.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/trigger.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/triggerbinding.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/triggers_client.go create mode 100644 vendor/github.com/tektoncd/triggers/pkg/client/clientset/versioned/typed/triggers/v1beta1/triggertemplate.go create mode 100644 vendor/github.com/vbatts/tar-split/LICENSE create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/common.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/format.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/reader.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime1.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_actime2.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/stat_unix.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/strconv.go create mode 100644 vendor/github.com/vbatts/tar-split/archive/tar/writer.go create mode 100644 vendor/github.com/whilp/git-urls/.travis.yml create mode 100644 vendor/github.com/whilp/git-urls/LICENSE create mode 100644 vendor/github.com/whilp/git-urls/Makefile create mode 100644 vendor/github.com/whilp/git-urls/README.md create mode 100644 vendor/github.com/whilp/git-urls/go.mod create mode 100644 vendor/github.com/whilp/git-urls/urls.go create mode 100644 vendor/github.com/xanzy/ssh-agent/.gitignore create mode 100644 vendor/github.com/xanzy/ssh-agent/LICENSE create mode 100644 vendor/github.com/xanzy/ssh-agent/README.md create mode 100644 vendor/github.com/xanzy/ssh-agent/go.mod create mode 100644 vendor/github.com/xanzy/ssh-agent/go.sum create mode 100644 vendor/github.com/xanzy/ssh-agent/pageant_windows.go create mode 100644 vendor/github.com/xanzy/ssh-agent/sshagent.go create mode 100644 vendor/github.com/xanzy/ssh-agent/sshagent_windows.go delete mode 100644 vendor/go.uber.org/multierr/.travis.yml delete mode 100644 vendor/go.uber.org/multierr/go113.go create mode 100644 vendor/golang.org/x/crypto/blowfish/block.go create mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go create mode 100644 vendor/golang.org/x/crypto/blowfish/const.go create mode 100644 vendor/golang.org/x/crypto/cast5/cast5.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.s create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_generic.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_noasm.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.go create mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.s create mode 100644 vendor/golang.org/x/crypto/chacha20/xor.go create mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/README create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_amd64_noasm.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64.s create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_arm64_noasm.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/fe_generic.go create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.checkpoint create mode 100644 vendor/golang.org/x/crypto/curve25519/internal/field/sync.sh create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go create mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519_go113.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go create mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64le.s create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.go create mode 100644 vendor/golang.org/x/crypto/internal/poly1305/sum_s390x.s create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go create mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/armor.go create mode 100644 vendor/golang.org/x/crypto/openpgp/armor/encode.go create mode 100644 vendor/golang.org/x/crypto/openpgp/canonical_text.go create mode 100644 vendor/golang.org/x/crypto/openpgp/elgamal/elgamal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/errors/errors.go create mode 100644 vendor/golang.org/x/crypto/openpgp/keys.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/compressed.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/config.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/literal.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/ocfb.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/one_pass_signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/opaque.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/packet.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/private_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/public_key_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/reader.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/signature_v3.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetric_key_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/symmetrically_encrypted.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userattribute.go create mode 100644 vendor/golang.org/x/crypto/openpgp/packet/userid.go create mode 100644 vendor/golang.org/x/crypto/openpgp/read.go create mode 100644 vendor/golang.org/x/crypto/openpgp/s2k/s2k.go create mode 100644 vendor/golang.org/x/crypto/openpgp/write.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/client.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/forward.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring.go create mode 100644 vendor/golang.org/x/crypto/ssh/agent/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go create mode 100644 vendor/golang.org/x/crypto/ssh/certs.go create mode 100644 vendor/golang.org/x/crypto/ssh/channel.go create mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go create mode 100644 vendor/golang.org/x/crypto/ssh/client.go create mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go create mode 100644 vendor/golang.org/x/crypto/ssh/common.go create mode 100644 vendor/golang.org/x/crypto/ssh/connection.go create mode 100644 vendor/golang.org/x/crypto/ssh/doc.go create mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go create mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go create mode 100644 vendor/golang.org/x/crypto/ssh/kex.go create mode 100644 vendor/golang.org/x/crypto/ssh/keys.go create mode 100644 vendor/golang.org/x/crypto/ssh/knownhosts/knownhosts.go create mode 100644 vendor/golang.org/x/crypto/ssh/mac.go create mode 100644 vendor/golang.org/x/crypto/ssh/messages.go create mode 100644 vendor/golang.org/x/crypto/ssh/mux.go create mode 100644 vendor/golang.org/x/crypto/ssh/server.go create mode 100644 vendor/golang.org/x/crypto/ssh/session.go create mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go create mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go create mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go create mode 100644 vendor/golang.org/x/crypto/ssh/transport.go create mode 100644 vendor/golang.org/x/net/idna/go118.go create mode 100644 vendor/golang.org/x/net/idna/pre_go118.go create mode 100644 vendor/golang.org/x/sys/cpu/asm_aix_ppc64.s create mode 100644 vendor/golang.org/x/sys/cpu/byteorder.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_aix.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_arm64.s rename vendor/golang.org/x/{term/term_unix_aix.go => sys/cpu/cpu_gc_arm64.go} (57%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gc_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.c create mode 100644 vendor/golang.org/x/sys/cpu/cpu_gccgo_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_linux_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_mipsx.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_netbsd_arm64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_arm.go rename vendor/golang.org/x/{term/term_unix_linux.go => sys/cpu/cpu_other_arm64.go} (57%) create mode 100644 vendor/golang.org/x/sys/cpu/cpu_other_mips64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_ppc64x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_riscv64.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_s390x.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_wasm.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_x86.s create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos.go create mode 100644 vendor/golang.org/x/sys/cpu/cpu_zos_s390x.go create mode 100644 vendor/golang.org/x/sys/cpu/hwcap_linux.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_gccgo.go create mode 100644 vendor/golang.org/x/sys/cpu/syscall_aix_ppc64_gc.go delete mode 100644 vendor/golang.org/x/term/term_solaris.go rename vendor/golang.org/x/term/{term_unix_zos.go => term_unix_other.go} (63%) create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/checked.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/eval.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/explain.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/syntax.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/api/expr/v1alpha1/value.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/dynamicpb/dynamic.go create mode 100644 vendor/google.golang.org/protobuf/types/known/emptypb/empty.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/structpb/struct.pb.go create mode 100644 vendor/gopkg.in/natefinch/npipe.v2/.gitignore create mode 100644 vendor/gopkg.in/natefinch/npipe.v2/LICENSE.txt create mode 100644 vendor/gopkg.in/natefinch/npipe.v2/README.md create mode 100644 vendor/gopkg.in/natefinch/npipe.v2/doc.go create mode 100644 vendor/gopkg.in/natefinch/npipe.v2/npipe_windows.go create mode 100644 vendor/gopkg.in/natefinch/npipe.v2/znpipe_windows_386.go create mode 100644 vendor/gopkg.in/natefinch/npipe.v2/znpipe_windows_amd64.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/.gitignore create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/.travis.yml create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/DCO create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/LICENSE create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/MAINTAINERS create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/Makefile create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/README.md create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/appveyor.yml create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/fs.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/go.mod create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/go.sum create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/helper/chroot/chroot.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/helper/polyfill/polyfill.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/osfs/os.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/osfs/os_posix.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/osfs/os_windows.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/util/glob.go create mode 100644 vendor/gopkg.in/src-d/go-billy.v4/util/util.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/.gitignore create mode 100644 vendor/gopkg.in/src-d/go-git.v4/.travis.yml create mode 100644 vendor/gopkg.in/src-d/go-git.v4/CODE_OF_CONDUCT.md create mode 100644 vendor/gopkg.in/src-d/go-git.v4/COMPATIBILITY.md create mode 100644 vendor/gopkg.in/src-d/go-git.v4/CONTRIBUTING.md create mode 100644 vendor/gopkg.in/src-d/go-git.v4/DCO create mode 100644 vendor/gopkg.in/src-d/go-git.v4/LICENSE create mode 100644 vendor/gopkg.in/src-d/go-git.v4/MAINTAINERS create mode 100644 vendor/gopkg.in/src-d/go-git.v4/Makefile create mode 100644 vendor/gopkg.in/src-d/go-git.v4/README.md create mode 100644 vendor/gopkg.in/src-d/go-git.v4/appveyor.yml create mode 100644 vendor/gopkg.in/src-d/go-git.v4/blame.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/config/branch.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/config/config.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/config/modules.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/config/refspec.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/go.mod create mode 100644 vendor/gopkg.in/src-d/go-git.v4/go.sum create mode 100644 vendor/gopkg.in/src-d/go-git.v4/internal/revision/parser.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/internal/revision/scanner.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/internal/revision/token.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/internal/url/url.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/object_walker.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/options.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/buffer_lru.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/cache/object_lru.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/error.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/filemode/filemode.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/decoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/encoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/option.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/config/section.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/patch.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/diff/unified_encoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/dir.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/matcher.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/gitignore/pattern.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/decoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/encoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/idxfile.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/idxfile/writer.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/decoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/encoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/index.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/index/match.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/reader.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/objfile/writer.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_index.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/delta_selector.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/diff_delta.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/encoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/error.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/fsobject.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/object_pack.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/packfile.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/parser.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/patch_delta.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/packfile/scanner.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/encoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/format/pktline/scanner.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/hash.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/memory.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/blob.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/change_adaptor.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_bfs_filtered.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_ctime.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/commit_walker_file.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/difftree.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/file.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/merge_base.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/object.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/patch.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tag.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/tree.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/object/treenoder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_decode.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/advrefs_encode.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/capability.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/capability/list.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/report_status.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/shallowupd.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/demux.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/sideband/muxer.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/srvresp.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_decode.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/ulreq_encode.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_decode.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/updreq_encode.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackreq.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/protocol/packp/uppackresp.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/reference.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/revision.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/revlist/revlist.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/index.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/object.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/reference.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/shallow.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/storer/storer.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/client/client.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/client.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/file/server.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/git/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/receive_pack.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/http/upload_pack.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/internal/common/server.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/loader.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/server/server.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/auth_method.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/plumbing/transport/ssh/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/prune.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/references.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/remote.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/repository.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/status.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/config.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/deltaobject.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_rewrite_packed_refs.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/dotgit_setref.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/dotgit/writers.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/index.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/module.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/object.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/reference.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/shallow.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/filesystem/storage.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/memory/storage.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/storage/storer.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/submodule.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/binary/read.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/binary/write.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/diff/diff.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/ioutil/common.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/change.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/difftree.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doc.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/doubleiter.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/filesystem/node.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/index/node.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/internal/frame/frame.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/iter.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/noder.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/utils/merkletrie/noder/path.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/worktree.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/worktree_bsd.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/worktree_commit.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/worktree_linux.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/worktree_status.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/worktree_unix_other.go create mode 100644 vendor/gopkg.in/src-d/go-git.v4/worktree_windows.go create mode 100644 vendor/gopkg.in/warnings.v0/LICENSE create mode 100644 vendor/gopkg.in/warnings.v0/README create mode 100644 vendor/gopkg.in/warnings.v0/warnings.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/rand/rand.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/remotecommand/constants.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/doc.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/errorstream.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/reader.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/resize.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v1.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v2.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v3.go create mode 100644 vendor/k8s.io/client-go/tools/remotecommand/v4.go create mode 100644 vendor/k8s.io/client-go/transport/spdy/spdy.go create mode 100644 vendor/k8s.io/client-go/util/exec/exec.go delete mode 100644 vendor/k8s.io/klog/.travis.yml delete mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md delete mode 100644 vendor/k8s.io/klog/LICENSE delete mode 100644 vendor/k8s.io/klog/README.md delete mode 100644 vendor/k8s.io/klog/RELEASE.md delete mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS delete mode 100644 vendor/k8s.io/klog/code-of-conduct.md delete mode 100644 vendor/k8s.io/klog/go.mod delete mode 100644 vendor/k8s.io/klog/go.sum delete mode 100644 vendor/k8s.io/klog/klog.go delete mode 100644 vendor/k8s.io/klog/klog_file.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/.gitignore create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/LICENSE create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/contact_info.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/external_docs.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/license.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go create mode 100644 vendor/knative.dev/kn-plugin-func/.codecov.yaml create mode 100644 vendor/knative.dev/kn-plugin-func/.gitattributes create mode 100644 vendor/knative.dev/kn-plugin-func/.gitignore create mode 100644 vendor/knative.dev/kn-plugin-func/.golangci.yaml create mode 100644 vendor/knative.dev/kn-plugin-func/.ko.yaml create mode 100644 vendor/knative.dev/kn-plugin-func/AUTHORS create mode 100644 vendor/knative.dev/kn-plugin-func/CHANGELOG.md create mode 100644 vendor/knative.dev/kn-plugin-func/LICENSE create mode 100644 vendor/knative.dev/kn-plugin-func/Makefile create mode 100644 vendor/knative.dev/kn-plugin-func/README.md create mode 100644 vendor/knative.dev/kn-plugin-func/buildpacks/builder.go create mode 100644 vendor/knative.dev/kn-plugin-func/ci create mode 100644 vendor/knative.dev/kn-plugin-func/client.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/build.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/client.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/completion.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/completion_util.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/config.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/config_envs.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/config_labels.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/config_volumes.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/create.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/delete.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/deploy.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/format.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/info.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/invoke.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/list.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/repository.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/root.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/run.go create mode 100644 vendor/knative.dev/kn-plugin-func/cmd/version.go create mode 100644 vendor/knative.dev/kn-plugin-func/config.go create mode 100644 vendor/knative.dev/kn-plugin-func/docker/creds/credentials.go create mode 100644 vendor/knative.dev/kn-plugin-func/docker/docker_client.go create mode 100644 vendor/knative.dev/kn-plugin-func/docker/docker_client_generated.go create mode 100644 vendor/knative.dev/kn-plugin-func/docker/pusher.go create mode 100644 vendor/knative.dev/kn-plugin-func/docker/runner.go create mode 100644 vendor/knative.dev/kn-plugin-func/filesystem.go create mode 100644 vendor/knative.dev/kn-plugin-func/function.go create mode 100644 vendor/knative.dev/kn-plugin-func/function_buildtype.go create mode 100644 vendor/knative.dev/kn-plugin-func/function_envs.go create mode 100644 vendor/knative.dev/kn-plugin-func/function_git.go create mode 100644 vendor/knative.dev/kn-plugin-func/function_labels.go create mode 100644 vendor/knative.dev/kn-plugin-func/function_migrations.go create mode 100644 vendor/knative.dev/kn-plugin-func/function_options.go create mode 100644 vendor/knative.dev/kn-plugin-func/function_volumes.go create mode 100644 vendor/knative.dev/kn-plugin-func/go.mod create mode 100644 vendor/knative.dev/kn-plugin-func/go.sum create mode 100644 vendor/knative.dev/kn-plugin-func/http/transport.go create mode 100644 vendor/knative.dev/kn-plugin-func/instances.go create mode 100644 vendor/knative.dev/kn-plugin-func/invoke.go create mode 100644 vendor/knative.dev/kn-plugin-func/job.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/client.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/configmaps.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/dialer.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/labels/labels.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/persistent_volumes.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/role_bidings.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/secrets.go create mode 100644 vendor/knative.dev/kn-plugin-func/k8s/service_accounts.go create mode 100644 vendor/knative.dev/kn-plugin-func/knative/client.go create mode 100644 vendor/knative.dev/kn-plugin-func/knative/deployer.go create mode 100644 vendor/knative.dev/kn-plugin-func/knative/describer.go create mode 100644 vendor/knative.dev/kn-plugin-func/knative/lister.go create mode 100644 vendor/knative.dev/kn-plugin-func/knative/remover.go create mode 100644 vendor/knative.dev/kn-plugin-func/openshift/openshift.go create mode 100644 vendor/knative.dev/kn-plugin-func/pipelines/tekton/client.go create mode 100644 vendor/knative.dev/kn-plugin-func/pipelines/tekton/defaults.go create mode 100644 vendor/knative.dev/kn-plugin-func/pipelines/tekton/pipeplines_provider.go create mode 100644 vendor/knative.dev/kn-plugin-func/pipelines/tekton/resources.go create mode 100644 vendor/knative.dev/kn-plugin-func/pipelines/tekton/tasks.go create mode 100644 vendor/knative.dev/kn-plugin-func/pkged.go create mode 100644 vendor/knative.dev/kn-plugin-func/plugin/plugin.go create mode 100644 vendor/knative.dev/kn-plugin-func/progress/progress.go create mode 100644 vendor/knative.dev/kn-plugin-func/repositories.go create mode 100644 vendor/knative.dev/kn-plugin-func/repository.go create mode 100644 vendor/knative.dev/kn-plugin-func/sortedset.go create mode 100644 vendor/knative.dev/kn-plugin-func/ssh/ssh_agent_conf.go create mode 100644 vendor/knative.dev/kn-plugin-func/ssh/ssh_agent_conf_windows.go create mode 100644 vendor/knative.dev/kn-plugin-func/ssh/ssh_dialer.go create mode 100644 vendor/knative.dev/kn-plugin-func/ssh/terminal.go create mode 100644 vendor/knative.dev/kn-plugin-func/template.go create mode 100644 vendor/knative.dev/kn-plugin-func/templates.go create mode 100644 vendor/knative.dev/kn-plugin-func/utils/names.go create mode 100644 vendor/knative.dev/kn-plugin-func/version.txt create mode 100644 vendor/knative.dev/pkg/kmap/lookup.go create mode 100644 vendor/knative.dev/pkg/kmap/map.go create mode 100644 vendor/knative.dev/pkg/leaderelection/doc.go diff --git a/go.mod b/go.mod index c317933f58..baa44d0e4a 100644 --- a/go.mod +++ b/go.mod @@ -9,25 +9,31 @@ require ( github.com/spf13/cobra v1.2.1 github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.9.0 - golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b gotest.tools/v3 v3.0.3 - k8s.io/api v0.21.4 - k8s.io/apiextensions-apiserver v0.21.4 - k8s.io/apimachinery v0.22.3 + k8s.io/api v0.22.5 + k8s.io/apiextensions-apiserver v0.22.5 + k8s.io/apimachinery v0.22.5 k8s.io/cli-runtime v0.21.4 - k8s.io/client-go v0.21.4 - k8s.io/code-generator v0.21.4 + k8s.io/client-go v0.22.5 + k8s.io/code-generator v0.22.5 knative.dev/eventing v0.27.2 - knative.dev/hack v0.0.0-20211122163517-fe1340f21191 + knative.dev/hack v0.0.0-20220216040439-0456e8bf6547 knative.dev/kn-plugin-event v0.27.1 + knative.dev/kn-plugin-func v0.21.0 knative.dev/kn-plugin-source-kafka v0.27.0 knative.dev/networking v0.0.0-20211101215640-8c71a2708e7d - knative.dev/pkg v0.0.0-20211101212339-96c0204a70dc + knative.dev/pkg v0.0.0-20220215153400-3c00bb0157b9 knative.dev/serving v0.27.1 sigs.k8s.io/yaml v1.3.0 ) replace ( + k8s.io/api => k8s.io/api v0.21.4 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.21.4 k8s.io/apimachinery => k8s.io/apimachinery v0.21.4 + k8s.io/client-go => k8s.io/client-go v0.21.4 + k8s.io/code-generator => k8s.io/code-generator v0.21.4 knative.dev/kn-plugin-event => github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa + knative.dev/kn-plugin-func => github.com/openshift-knative/kn-plugin-func v0.22.1-0.20220223003546-b1c8c38b7608 ) diff --git a/go.sum b/go.sum index f677564f18..077bebd16a 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,10 @@ +4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.43.0/go.mod h1:BOSR3VbTLkk6FDC/TcffxP4NF/FFBGA5ku+jvKOP7pg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= @@ -13,8 +16,10 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.67.0/go.mod h1:YNan/mUhNZFrYUor0vqrsQ0Ffl7Xtm/ACOy/vsTS858= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= @@ -43,20 +48,33 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= +cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.18.2/go.mod h1:AiIj7BWXyhO5gGVmYJ+S8tbkCx3yb0IMjua8Aw4naVM= +code.gitea.io/sdk/gitea v0.14.0/go.mod h1:89WiyOX1KEcvjP66sRHdu0RafojGo60bT9UqW17VbWs= contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d h1:LblfooH1lKOpp1hIhukktmSAxFkqMPFk9KR6iZ0MJNI= contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d/go.mod h1:IshRmMJBhDfFj5Y67nVhMYTTIze91RUeT73ipWKs/GY= contrib.go.opencensus.io/exporter/prometheus v0.2.1-0.20200609204449-6bcf6f8577f0/go.mod h1:MjHoxkI7Ny27toPeFkRbXbzVjzIGkwOAptrAy8Mxtm8= +contrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE= contrib.go.opencensus.io/exporter/prometheus v0.4.0 h1:0QfIkj9z/iVZgK31D9H9ohjjIDApI2GOPScCKwxedbs= contrib.go.opencensus.io/exporter/prometheus v0.4.0/go.mod h1:o7cosnyfuPVK0tB8q0QmaQNhGnptITnPQB+z1+qeFB0= +contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= contrib.go.opencensus.io/exporter/zipkin v0.1.2/go.mod h1:mP5xM3rrgOjpn79MM8fZbj3gsxcuytSqtH0dxSWW1RE= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.maze.io/go/duration v0.0.0-20160924141736-faac084b6075/go.mod h1:y9MVD7vG8A7scVb2MvTVxN0z+DHTzxnWgOisOnFe5kc= +github.com/14rcole/gopopulate v0.0.0-20180821133914-b175b219e774/go.mod h1:6/0dYRLLXyJjbkIPeeGyoJ/eKOSI0eU6eTlCBYibgd0= +github.com/AlecAivazis/survey/v2 v2.2.12/go.mod h1:6d4saEvBsfSHXeN1a5OA5m2+HJ2LuVokllnC77pAIKI= +github.com/AlecAivazis/survey/v2 v2.2.14 h1:aTYTaCh1KLd+YWilkeJ65Ph78g48NVQ3ay9xmaNIyhk= +github.com/AlecAivazis/survey/v2 v2.2.14/go.mod h1:TH2kPCDU3Kqq7pLbnCWwZXDBjnhZtmsCle5EiYDJ2fg= +github.com/Antonboom/errname v0.1.5/go.mod h1:DugbBstvPFQbv/5uLcRRzfrNqKE9tVdVCqWCLp6Cifo= +github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H20D3IPZBo= github.com/Azure/azure-amqp-common-go/v3 v3.0.1/go.mod h1:PBIGdzcO1teYoufTKMcGibdKaYZv4avS+O6LNIp8bq0= github.com/Azure/azure-event-hubs-go/v3 v3.3.2/go.mod h1:sszMsQpFy8Au2s2NColbnJY8lRVm1koW0XxBJ3rN5TY= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= @@ -70,13 +88,14 @@ github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHN github.com/Azure/go-amqp v0.13.0/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= github.com/Azure/go-amqp v0.13.1/go.mod h1:qj+o8xPCz9tMSbQ83Vp8boHahuRDl5mkNHyt1xlxUTs= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795 h1:q4kDoSrHgRoD6okimjwWJOVKyxEUNS2JIuwt+EqcIqQ= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= -github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.3/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.17 h1:2zCdHwNgRH+St1J+ZMf66xI8aLr/5KMy+wWLH97zwYM= @@ -115,11 +134,22 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher v0.0.0-20191203181535-308b93ad1f39/go.mod h1:yfGmCjKuUzk9WzubMlW2zwjhCraIc/J+M40cufdemRM= github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -129,6 +159,7 @@ github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JP github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -137,12 +168,22 @@ github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg3 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21 h1:btRfUDThBE5IKcvI8O8jOiIkujUsAMBSRsYDYmEi6oM= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/Netflix/go-expect v0.0.0-20200312175327-da48e75238e2/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/Netflix/go-expect v0.0.0-20210722184520-ef0bf57d82b3 h1:DM0Olh3jQEm4gVPLF0mI49+fm1+M1fXDtumX/fN/G4A= +github.com/Netflix/go-expect v0.0.0-20210722184520-ef0bf57d82b3/go.mod h1:68ORG0HSEWDuH5Eh73AFbYWZ1zT4Y+b0vhOa+vZRUdI= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -161,12 +202,20 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae h1:ePgznFqEG1v3AjMklnK8H7BSc++FDSo7xfK9K7Af+0Y= github.com/Shopify/toxiproxy/v2 v2.1.6-0.20210914104332-15ea381dcdae/go.mod h1:/cvHQkZ1fst0EmZnA5dFtiQdWCNCFYzb+uE2vqVgvx0= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= +github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= +github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210420163308-c1402a70e2f1/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= github.com/ahmetb/gen-crd-api-reference-docs v0.3.1-0.20210609063737-0067dc6dcea2/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/jsonschema v0.0.0-20180308105923-f2c93856175a/go.mod h1:qpebaTNSsyUn5rPSJMsfqEtDw71TTggXM6stUDI16HA= +github.com/alecthomas/jsonschema v0.0.0-20210526225647-edb03dcab7bc/go.mod h1:/n6+1/DWPltRLWL/VKyUxg6tzsl5kHUCcraimt4vr60= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -174,27 +223,54 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/andybalholm/brotli v1.0.2/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/andybalholm/brotli v1.0.3/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f h1:0cEys61Sr2hUBEXfNV8eyQP01oZuBgoMeHunebPirK8= +github.com/antlr/antlr4 v0.0.0-20200503195918-621b933c7a7f/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= +github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/ashanbrown/forbidigo v1.2.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9Dnez7/ESBqc4EdrdNlryeo7d0KcW1ftXHm7nU/UU= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.31.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.35.24/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.37.1/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -204,22 +280,35 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0 h1:Kn4yilvwNtMACtf1eYDlG8H77R07mZSPbMjLyS07ChA= github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/blendle/zapdriver v1.3.1 h1:C3dydBOWYRiOk+B8X9IVZ5IOe+7cl+tGOexN4QqHfpE= github.com/blendle/zapdriver v1.3.1/go.mod h1:mdXfREi6u5MArG4j9fewC+FGnXaBR+T4Ox4J2u4eHCc= +github.com/blizzy78/varnamelen v0.3.0/go.mod h1:hbwRdBvoBqxk34XyQ6HA0UH3G0/1TKuv5AC4eaBT0Ec= +github.com/bluekeyes/go-gitdiff v0.4.0/go.mod h1:QpfYYO1E0fTVHVZAZKiRjtSGY9823iCdvGXBcEzHGbM= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/perks v0.0.0-20141205001514-d9a9656a3a4b/go.mod h1:ac9efd0D1fsDb3EJvhqgXRbFx7bs2wqZ10HQPeU8U/Q= +github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= +github.com/breml/bidichk v0.1.1/go.mod h1:zbfeitpevDUGI7V91Uzzuwrn4Vls8MoBMrwtt78jmso= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771 h1:QhUkvEeYhnyj80genY7ktXVLttVc52zstwPTUDrImvE= +github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771/go.mod h1:ZfCbsFruoxG0vbEVMsX/afizEo4vn2e88Km7rJ1M2D8= +github.com/buildpacks/lifecycle v0.12.0 h1:H2K4uYfe/y1mfGekOPcR5flmFgmEmaVK9+I5V9Iz+M8= +github.com/buildpacks/lifecycle v0.12.0/go.mod h1:ub/iI72mgJT4d7JgcL06KmtvU4X4HTnFgUyqJKVYH2o= +github.com/buildpacks/pack v0.22.0 h1:JWQQBt6qdr668T9WDRMofJjfRUZhJxqbd7P8nshnEVk= +github.com/buildpacks/pack v0.22.0/go.mod h1:kH9L19KUXHiJ8TEAUVLhykNgmTJSFtC1fIpNCcfMVEw= +github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= github.com/c2h5oh/datasize v0.0.0-20171227191756-4eba002a5eae/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/c2h5oh/datasize v0.0.0-20200112174442-28bbd4740fee/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= @@ -232,6 +321,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= +github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/checkpoint-restore/go-criu/v4 v4.0.2/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= @@ -239,6 +331,7 @@ github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWR github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200507155900-a9f01edf17e3/go.mod h1:XT+cAw5wfvsodedcijoh1l9cf7v1x9FlFB/3VmF/O8s= github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= @@ -248,9 +341,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudevents/conformance v0.2.0/go.mod h1:rHKDwylBH89Rns6U3wL9ww8bg9/4GbwRCDNuyoC6bcc= github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.4.1/go.mod h1:lhEpxMrIUkeu9rVRgoAbyqZ8GR8Hd3DUy+thHUxAHoI= github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.4.1/go.mod h1:8aqqE69pYzEgN2sCa6+8fTQ+FpE7QMwYnLkDKyKVtYw= +github.com/cloudevents/sdk-go/v2 v2.1.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU= github.com/cloudevents/sdk-go/v2 v2.2.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU= -github.com/cloudevents/sdk-go/v2 v2.4.1 h1:rZJoz9QVLbWQmnvLPDFEmv17Czu+CfSPwMO6lhJ72xQ= github.com/cloudevents/sdk-go/v2 v2.4.1/go.mod h1:MZiMwmAh5tGj+fPFvtHv9hKurKqXtdB9haJYMJ/7GJY= +github.com/cloudevents/sdk-go/v2 v2.5.0 h1:Ts6aLHbBUJfcNcZ4ouAfJ4+Np7SE1Yf2w4ADKRCd7Fo= +github.com/cloudevents/sdk-go/v2 v2.5.0/go.mod h1:nlXhgFkf0uTopxmRXalyMwS2LG70cRGPrxzmjJgSG0U= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= @@ -259,6 +354,7 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= @@ -278,6 +374,7 @@ github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f2 github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= @@ -294,6 +391,7 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7 h1:rQyoYtj4KddB3bxG6SAqd4+08gePNyJjRqvOIfV3rkM= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -301,6 +399,7 @@ github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -326,6 +425,7 @@ github.com/containerd/stargz-snapshotter/estargz v0.0.0-20201223015020-a9a0c2d64 github.com/containerd/stargz-snapshotter/estargz v0.6.4/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= github.com/containerd/stargz-snapshotter/estargz v0.8.0/go.mod h1:mwIwuwb+D8FX2t45Trwi0hmWmZm5VW7zPP/rekwhWQU= +github.com/containerd/stargz-snapshotter/estargz v0.9.0 h1:PkB6BSTfOKX23erT2GkoUKkJEcXfNcyKskIViK770v8= github.com/containerd/stargz-snapshotter/estargz v0.9.0/go.mod h1:aE5PCyhFMwR8sbrErO5eM2GcvkyXTTJremG883D4qF0= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= @@ -346,9 +446,15 @@ github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/image/v5 v5.10.6 h1:FIx2FFfE/RzaIAIRT1OH41RSPDXqrvlL/TP7x/TTPzc= +github.com/containers/image/v5 v5.10.6/go.mod h1:SgIbWEedCNBbn2FI5cH0/jed1Ecy2s8XK5zTxvJTzII= +github.com/containers/libtrust v0.0.0-20190913040956-14b96171aa3b/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.0.3/go.mod h1:CUBa+8MRNL/VkpxYIpaMtgn1WgXGyvPQj8jcy0EVG6g= github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/containers/storage v1.24.8 h1:v3pliVY5Jx1ZNJDoyCvRBrXMxlGLakBsxbBVWoaCI8Q= +github.com/containers/storage v1.24.8/go.mod h1:YC+2pY8SkfEAcZkwycxYbpK8EiRbx5soPPwz9dxe4IQ= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -357,10 +463,13 @@ github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= @@ -374,48 +483,71 @@ github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKY github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denis-tingajkin/go-header v0.4.2/go.mod h1:eLRHAVXzE5atsKAnNRDB90WHCFFnBUn4RN0nRcs1LJA= +github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgraph-io/ristretto v0.0.1 h1:cJwdnj42uV8Jg4+KLrYovLiCgIfz9wtWm6E6KA+1tLs= +github.com/dgraph-io/ristretto v0.0.1/go.mod h1:T40EBc7CJke8TkpiYfGGKAeFjSaxuFXhuXRyumBd6RE= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-gk v0.0.0-20140819190930-201884a44051/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlDt0aOla0RYJJVeqHZlWfOm2UIxHaqPB46E= github.com/dgryski/go-lttb v0.0.0-20180810165845-318fcdf10a77/go.mod h1:Va5MyIzkU0rAM92tn3hb3Anb7oz7KcnixF49+2wOMe4= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dimfeld/httppath v0.0.0-20170720192232-ee938bf73598/go.mod h1:0FpDmbrt36utu8jEmeU05dPC9AB5tsLYVVi+ZHfyuwI= +github.com/dimfeld/httptreemux/v5 v5.0.2/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw= +github.com/dimfeld/httptreemux/v5 v5.2.2/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw= +github.com/dimfeld/httptreemux/v5 v5.3.0/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.10+incompatible h1:kcbwdgWbrBOH8QwQzaJmyriHwF7XIl4HT1qh0HTRys4= github.com/docker/cli v20.10.10+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v1.4.2-0.20191219165747-a9416c67da9f/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.10+incompatible h1:GKkP0T7U4ks6X3lmmHKC2QDprnpRJor2Z5a8m62R9ZM= github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dprotaso/go-yit v0.0.0-20191028211022-135eb7262960/go.mod h1:9HQzr9D/0PGwMEbC3d5AB7oi67+h4TsQqItC1GVYG58= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -428,11 +560,14 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -442,18 +577,27 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/esimonov/ifshort v1.0.3/go.mod h1:yZqNJUrNn20K8Q9n2CrjTKYyVEmX209Hgu+M1LBpeZE= +github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.2.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk= github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= @@ -469,17 +613,40 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= +github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell v1.2.0 h1:ikixzsxc8K8o3V2/CEmyoEW8mJZaNYQQ3NP3VIQdUe4= +github.com/gdamore/tcell v1.2.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM= +github.com/gdamore/tcell/v2 v2.3.3/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= +github.com/gdamore/tcell/v2 v2.4.0 h1:W6dxJEmaxYvhICFoTY3WrLLEXsQ11SaFnKGVEXW57KM= +github.com/gdamore/tcell/v2 v2.4.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= +github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM= github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6w= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= +github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= +github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gormigrate/gormigrate/v2 v2.0.0/go.mod h1:YuVJ+D/dNt4HWrThTBnjgZuRbt7AuwINeg4q52ZE3Jw= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= @@ -496,14 +663,24 @@ github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTg github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/analysis v0.19.10/go.mod h1:qmhS3VNFxBlquFJ0RGoDtylO9y4pgTAUNE9AEEMdlJQ= +github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7workfHwuVjNVk= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= @@ -522,10 +699,19 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= +github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= +github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= +github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= +github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= +github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= @@ -533,40 +719,98 @@ github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcs github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.5/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/spec v0.19.6/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.8/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= +github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= +github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= github.com/go-openapi/spec v0.20.2 h1:pFPUZsiIbZ20kLUcuCGeuQWG735fPMxW7wHF9BWlnQU= github.com/go-openapi/spec v0.20.2/go.mod h1:RW6Xcbs6LOyWLU/mXGdzn2Qc+3aj+ASfI7rvSZh1Vls= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.4/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= +github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= +github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= +github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5HTt47gr72M= github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= +github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= +github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= +github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= +github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-testfixtures/testfixtures/v3 v3.2.0/go.mod h1:RZctY24ixituGC73XlAV1gkCwYMVwiSwPm26MNlQIhE= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= github.com/go-training/helloworld v0.0.0-20200225145412-ba5f4379d78b/go.mod h1:hGGmX3bRUkYkc9aKA6mkUxi6d+f1GmZF1je0FlVTgwU= +github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= +github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= +github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= +github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= +github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= +github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= +github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= +github.com/gobuffalo/flect v0.2.4/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8= +github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= +github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= +github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= +github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= +github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= +github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= +github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= +github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= +github.com/gobuffalo/here v0.6.0 h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= +github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= +github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -574,10 +818,14 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -595,7 +843,9 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -619,6 +869,16 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.43.0/go.mod h1:VIFlUqidx5ggxDfQagdvd9E67UjMXtTHBkBQ7sHoC5Q= +github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2/go.mod h1:LK+zW4MpyytAWQRz0M4xnzEk50lSvqDQKfx304apFkY= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc= github.com/gonum/diff v0.0.0-20181124234638-500114f11e71/go.mod h1:22dM4PLscQl+Nzf64qNBurVJvfyvZELT0iRW2l/NN70= github.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg= @@ -631,6 +891,11 @@ github.com/gonum/stat v0.0.0-20181125101827-41a0da705a5b/go.mod h1:Z4GIJBJO3Wa4g github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/cel-go v0.7.3 h1:8v9BSN0avuGwrHFKNCjfiQ/CE6+D6sW+BDyOVoEeP6o= +github.com/google/cel-go v0.7.3/go.mod h1:4EtyFAHT5xNr0Msu0MJjyGxPUgdr9DlcaPyzLt/kkt8= +github.com/google/cel-spec v0.5.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= +github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -645,18 +910,27 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-containerregistry v0.4.1-0.20210128200529-19c2b639fab1/go.mod h1:GU9FUA/X9rd2cV3ZoUNaWihp27tki6/38EsVzL2Dyzc= +github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9/go.mod h1:R5WRYyTdQqTchlBhX4q+WICGh8HQIL5wDFoFZv7Jq6Q= github.com/google/go-containerregistry v0.5.2-0.20210609162550-f0ce2270b3b4/go.mod h1:R5WRYyTdQqTchlBhX4q+WICGh8HQIL5wDFoFZv7Jq6Q= +github.com/google/go-containerregistry v0.5.2-0.20210709161016-b448abac9a70/go.mod h1:nS0LmwaZT5kotebyNVC7ik7lC4OHOzGcctvvSosKlAc= github.com/google/go-containerregistry v0.6.0 h1:niQ+8XD//kKgArIFwDVBXsWVWbde16LPdHMyNwSC8h4= github.com/google/go-containerregistry v0.6.0/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210129212729-5c4818de4025/go.mod h1:n9wRxRfKkHy6ZFyj0jJQHw11P+mGLnED4sqegwrXxDk= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210610160139-c086c7f16d4e/go.mod h1:u9BUkrFoN0hojbyaW5occdRyQvT74KjJKx2VClbrDC8= github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210624211700-ce35c99b3faf/go.mod h1:j3IqhBG3Ox1NXmmhbWU4UmiHVAf2dUgB7le1Ch7JZQ0= +github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210918223331-0e8b581974dd/go.mod h1:j3IqhBG3Ox1NXmmhbWU4UmiHVAf2dUgB7le1Ch7JZQ0= github.com/google/go-github/v27 v27.0.6/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= +github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQFEufcolZ95JfU8= +github.com/google/go-github/v31 v31.0.0/go.mod h1:NQPZol8/1sMoWYGN2yaALIBytu17gAWfhbweiEed3pM= +github.com/google/go-licenses v0.0.0-20200602185517-f29a4c695c3d/go.mod h1:g1VOUGKZYIqe8lDq2mL7plhAWXqrEaGUs7eIjthN1sk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gxui v0.0.0-20151028112939-f85e0a97b3a4/go.mod h1:Pw1H1OjSNHiqeuxAduB1BKYXIwFtsyrY47nEqSgEiCM= github.com/google/ko v0.9.3/go.mod h1:Q6jYoY4SzKhRTaniQgnNoLaj4e4fVIWTBLK4FTJQPtM= +github.com/google/licenseclassifier v0.0.0-20190926221455-842c0d70d702/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/licenseclassifier v0.0.0-20200708223521-3d09a0ea2f39/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3/go.mod h1:YzLcVlL+NqWnmUEPuhS1LxDDwGO9WNbVlEXaF4IH35g= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -665,11 +939,14 @@ github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIG github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -680,45 +957,78 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLe github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= +github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= -github.com/googleapis/gnostic v0.5.3 h1:2qsuRm+bzgwSIKikigPASa2GhW8H2Dn4Qq7UxD8K/48= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.3/go.mod h1:TRWw1s4gxBGjSe301Dai3c7wXJAZy57+/6tawkOvqHQ= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= +github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254/go.mod h1:M9mZEtGIsR1oDaZagNPNG9iq9n2HrhZ17dsXk73V3Lw= +github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/pat v0.0.0-20180118222023-199c85a7f6d1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.1.1/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= +github.com/gostaticanalysis/analysisutil v0.4.1/go.mod h1:18U/DLpRgIUd459wGxVHE0fRgmo1UgHDcbw7F5idXu0= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= +github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.0.0-20200621232751-01d4955beaa5/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q= github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.14.8/go.mod h1:NZE8t6vs6TnwLL/ITkaK8W3ecMLGAbh2jXTclvpiwYo= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/h2non/gock v1.0.9/go.mod h1:CZMcB0Lg5IWnr9bF79pPMg9WeV6WumxQiUJ1UvdO1iE= +github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI= +github.com/hako/durafmt v0.0.0-20191009132224-3f39dc1ed9f4 h1:60gBOooTSmNtrqNaRvrDbi8VAne0REaek2agjnITKSw= +github.com/hako/durafmt v0.0.0-20191009132224-3f39dc1ed9f4/go.mod h1:5Scbynm8dF1XAPwIwkGPqzkM/shndPm79Jd1003hTjE= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= @@ -727,6 +1037,7 @@ github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyN github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= @@ -736,6 +1047,7 @@ github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.6.7/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= @@ -747,6 +1059,8 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -762,10 +1076,22 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/heroku/color v0.0.6 h1:UTFFMrmMLFcL3OweqP1lAdp8i1y/9oHqkeHjQ/b/Ny0= +github.com/heroku/color v0.0.6/go.mod h1:ZBvOcx7cTF2QKOv4LbmoBtNl5uB17qWxGuzZrsi1wLU= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c h1:kp3AxgXgDOmIJFR7bIwqFhwJ2qWar8tEQSE5XXhCfVk= +github.com/hinshun/vt10x v0.0.0-20180809195222-d55458df857c/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= +github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ikawaha/goahttpcheck v1.3.1/go.mod h1:RFH8+oq3GOW2yiRwI5tcx3FrVmONlT4DMmu80dsoBRE= +github.com/ikawaha/httpcheck v1.2.0/go.mod h1:eYUWSdmssaYCdUzcOUe8NMF0vgr4MKq7Iqbb3eXtpxI= +github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -781,6 +1107,54 @@ github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mq github.com/influxdata/tdigest v0.0.0-20191024211133-5d87a7585faa/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/influxdata/tdigest v0.0.1/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78= +github.com/jackc/pgconn v1.7.0/go.mod h1:sF/lPpNEMEOp+IYhyQGdAvrG20gWf6A1tKlr0v7JMeA= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.5/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgtype v1.5.0/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0= +github.com/jackc/pgx/v4 v4.9.0/go.mod h1:MNGWmViCgqbZck9ujOOBN63gK9XVGILXWCvKLGKmnms= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.2/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -794,48 +1168,79 @@ github.com/jcmturner/gokrb5/v8 v8.4.2 h1:6ZIM6b/JJN0X8UM43ZOM6Z4SJzla+a/u7scXFJz github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jenkins-x/go-scm v1.5.117/go.mod h1:PCT338UhP/pQ0IeEeMEf/hoLTYKcH7qjGEKd7jPkeYg= +github.com/jenkins-x/go-scm v1.10.10/go.mod h1:z7xTO9/VzqW3xEbEMH2z5cpOGrZ8+nOHOWfU1ngFGxs= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/josharian/txtarfs v0.0.0-20210218200122-0702f000015a/go.mod h1:izVPOvVRsHiKkeGCT6tYBNWyDVuzj9wAaBb5R9qamfw= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= +github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= +github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/errcheck v1.6.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.5/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -846,15 +1251,41 @@ github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8 h1:AkaSdXYQOWeaO3neb8EM634ahkXXe3jYbVh/F9lq+GI= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/ktr0731/go-fuzzyfinder v0.2.0/go.mod h1:Ol2Z6Rc1tu/uUSlD6b67wnhB4nGQt0Mr2NtP0SNW6uM= +github.com/kulti/thelper v0.4.0/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= +github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= +github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= +github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/lestrrat-go/jwx v0.9.0/go.mod h1:iEoxlYfZjvoGpuWwxUz+eR5e6KTJGsaRcy/YNA/UnBk= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/tracecontext.go v0.0.0-20181129014701-1757c391b1ac/go.mod h1:Frd2bnT3w5FB5q49ENTfVlztJES+1k/7lyWX2+9gq/M= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= @@ -868,27 +1299,58 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.1-0.20191009090205-6c0755d89d1e/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/manifoldco/promptui v0.8.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= +github.com/manveru/faker v0.0.0-20171103152722-9fbc68a78c4d/go.mod h1:WZy8Q5coAB1zhY9AOBJP0O6J4BuDfbupUDavKY+I3+s= +github.com/manveru/gobdd v0.0.0-20131210092515-f1a17fdd710b/go.mod h1:Bj8LjjP0ReT1eKt5QlKjwgi5AFm5mI6O1A2G4ChI0Ag= +github.com/maratori/testpackage v1.0.1/go.mod h1:ddKdw+XG0Phzhx8BFDTKgpWP4i7MpApTE5fXSKAqwDU= +github.com/markbates/going v1.0.0/go.mod h1:I6mnB4BPnEeqo85ynXIx1ZFLLbtiLHNXVgWeFO9OGOA= +github.com/markbates/goth v1.68.0/go.mod h1:V2VcDMzDiMHW+YmqYl7i0cMiAUeCkAe4QE6jRKBhXZw= +github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/pkger v0.17.1 h1:/MKEtWqtc0mZvu9OinB9UzVN9iYCwLWuyUv4Bw+PCno= github.com/markbates/pkger v0.17.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= +github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= github.com/mattmoor/dep-notify v0.0.0-20190205035814-a45dec370a17/go.mod h1:qWnF4u+oS4UWOZMwZcBQXrt5IQIdWc6XVJLDdxGIfdQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.11 h1:nQ+aFkoE2TMGc0b68U2OKSexC+eq46+XwZzWXHRmPYs= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/mattn/go-sqlite3 v2.0.2+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= @@ -897,50 +1359,85 @@ github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88J github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0/go.mod h1:fcEyUyXZXoV4Abw8DX0t7wyL8mCDxXyU4iAFZfT3IHw= github.com/maximilien/kn-source-pkg v0.6.3 h1:5S9jgof8KSvde/hrM0VaOMZ6LmBPTIMO05fdDpbVbvI= github.com/maximilien/kn-source-pkg v0.6.3/go.mod h1:3Nym+ahhV18lkHutauJdw1r3PmlDfu/DoTUuF8ySMt4= +github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= +github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= +github.com/mgechev/revive v1.1.2/go.mod h1:bnXsMr+ZTH09V5rssEI+jHAZ4z+ZdyhgO/zsy3EhK+0= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.17/go.mod h1:WgzbA6oji13JREwiNsRDNfl7jYdPnmz+VEuLrA+/48M= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e h1:Qa6dnn8DlasdXRnacluu8HzPts0S1I9zvvUPDbBnXFI= +github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e/go.mod h1:waEya8ee1Ro/lgxpVhkJI4BVASzkm3UZqkx/cFJiYHM= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mrjones/oauth v0.0.0-20180629183705-f4e24b6d100c/go.mod h1:skjdDftzkFALcuGzYSklqYd8gvat6F1gZJ4YPVbkZpM= +github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/mtrmac/gpgme v0.1.2/go.mod h1:GYYHnGSuS7HK3zVS2n3y73y0okK/BeKzwnn5jgiVFNI= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -948,8 +1445,13 @@ github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzE github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms= +github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= +github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -957,6 +1459,9 @@ github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtb github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= @@ -990,43 +1495,67 @@ github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20210730191737-8e42a01fb1b7/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.3-0.20211202222133-eacdcc10569b h1:0kCLoY3q1n4zDPYBdGhE/kdcyLWl/aAQmJFQrCPNJ6k= +github.com/opencontainers/image-spec v1.0.3-0.20211202222133-eacdcc10569b/go.mod h1:j4h1pJW6ZcJTgMZWP3+7RlG3zTaP02aDZ/Qw0sppK7Q= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc91/go.mod h1:3Sm6Dt7OT8z88EbdQqqcRN2oCT54jbi72tT/HqgflT8= github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200520003142-237cc4f519e2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 h1:3snG66yBm59tKhhSPQrQ/0bCrv1LQbKt40LnUPiUxdc= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.5.1/go.mod h1:yTcKuYAh6R95iDpefGLQaPaRwJFwyzAJufJyiTt7s0g= github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2 h1:c4ca10UMgRcvZ6h0K4HtS15UaVSBEaE+iln2LVpAuGc= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa h1:Zmaj3Ia2pRoN4/HqbiDu9TBkxoPk7/9h6R53OGtOAlA= github.com/openshift-knative/kn-plugin-event v0.27.1-0.20220223114256-af13ecf492aa/go.mod h1:z/RbZ2zAWlirxAvKNNikas7S19P8V1nr8XXG0cXr3M0= +github.com/openshift-knative/kn-plugin-func v0.22.1-0.20220223003546-b1c8c38b7608 h1:vI7+9FRrQFElvqX90z3GG4HfHiBIvJ13WTMCwoWQorI= +github.com/openshift-knative/kn-plugin-func v0.22.1-0.20220223003546-b1c8c38b7608/go.mod h1:iKNFs9OpstK5dVrmmQR162m+Kaj+8/caz91onypfNZA= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.0/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/openzipkin/zipkin-go v0.3.0 h1:XtuXmOLIXLjiU2XduuWREDT0LOKtSgos/g7i7RYyoZQ= github.com/openzipkin/zipkin-go v0.3.0/go.mod h1:4c3sLeE8xjNqehmF5RpAFLPLJxXscc0R4l6Zg0P1tTQ= +github.com/ory/viper v1.7.5 h1:+xVdq7SU3e1vNaCsk/ixsfxE4zylk1TJUiJrY647jUE= +github.com/ory/viper v1.7.5/go.mod h1:ypOuyJmEUb3oENywQZRgeAMwqgOyDqwboO1tj3DjTaM= +github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= +github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= +github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= +github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -1036,6 +1565,7 @@ github.com/pelletier/go-toml/v2 v2.0.0-beta.2/go.mod h1:+X+aW6gUj6Hda43TeYHVCIvY github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= @@ -1053,11 +1583,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -1066,6 +1600,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= @@ -1085,10 +1620,13 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.19.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -1102,16 +1640,29 @@ github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDa github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/statsd_exporter v0.15.0/go.mod h1:Dv8HnkoLQkeEjkIE4/2ndAA7WL1zHKK7WMqFQqu72rw= +github.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ= github.com/prometheus/statsd_exporter v0.21.0 h1:hA05Q5RFeIjgwKIYEdFd59xu5Wwaznf33yKI+pyX6T8= github.com/prometheus/statsd_exporter v0.21.0/go.mod h1:rbT83sZq2V+p73lHhPZfMc3MLCHmSHelCh9hSGYNLTQ= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= +github.com/quasilyte/go-ruleguard v0.3.13/go.mod h1:Ul8wwdqR6kBVOCt2dipDBkE+T6vAV/iixkrKuRTN1oQ= +github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/dsl v0.3.10/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= +github.com/quasilyte/go-ruleguard/rules v0.0.0-20210428214800-545e0d2e0bf7/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= +github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20190706150252-9beb055b7962/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1120,46 +1671,85 @@ github.com/rickb777/date v1.13.0 h1:+8AmwLuY1d/rldzdqvqTEg7107bZ8clW37x4nsdG3Hs= github.com/rickb777/date v1.13.0/go.mod h1:GZf3LoGnxPWjX+/1TXOuzHefZFDovTyNLHDMd3qH70k= github.com/rickb777/plural v1.2.1 h1:UitRAgR70+yHFt26Tmj/F9dU9aV6UfjGXSbO1DcC9/U= github.com/rickb777/plural v1.2.1/go.mod h1:j058+3M5QQFgcZZ2oKIOekcygoZUL8gKW5yRO14BuAw= +github.com/rivo/tview v0.0.0-20210624165335-29d673af0ce2 h1:I5N0WNMgPSq5NKUFspB4jMJ6n2P0ipz5FlOlB4BXviQ= +github.com/rivo/tview v0.0.0-20210624165335-29d673af0ce2/go.mod h1:IxQujbYMAh4trWr0Dwa8jfciForjVmxyHpskZX6aydQ= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/dnscache v0.0.0-20210201191234-295bba877686/go.mod h1:qe5TWALJ8/a1Lqznoc5BDHpYX/8HU60Hm2AwRmqzxqA= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/rs/zerolog v1.17.2/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= github.com/rubiojr/go-vhd v0.0.0-20200706105327-02e210299021/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= +github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sabhiram/go-gitignore v0.0.0-20201211074657-223ce5d391b0 h1:4Q/TASkyjpqyR5DL5+6c2FGSDpHM5bTMSspcXr7J6R8= +github.com/sabhiram/go-gitignore v0.0.0-20201211074657-223ce5d391b0/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec/v2 v2.9.1/go.mod h1:oDcDLcatOJxkCGaCaq8lua1jTnYf6Sou4wdiJ1n4iHc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= github.com/slinkydeveloper/loadastic v0.0.0-20201218203601-5c69eea3b7d8/go.mod h1:vNSJ7ak+2OeAb5bhSHRmMKKcbadbxlokx/Wh1lC50e4= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1195,7 +1785,11 @@ github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/sqs/goreturns v0.0.0-20181028201513-538ac6014518/go.mod h1:CKI4AZ4XmGV240rTHfO0hfE83S6/a3/Q1siZJ/vXf7A= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1206,9 +1800,13 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -1216,33 +1814,83 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tchap/go-patricia v2.3.0+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/tektoncd/cli v0.22.0 h1:X0EL67tPQvaMWyeocNJflJyIPeexD5AgTkp/I2igT6g= +github.com/tektoncd/cli v0.22.0/go.mod h1:F1EIO3esxKoACMCMNLI4+Ci10KssCmk3Vr+65LLyZjs= +github.com/tektoncd/hub/api v0.0.0-20220128080824-5941f280d06a/go.mod h1:ckgU0iLL6PdwTnma9GkNYfmMVCXdzI8X6vh6GeJrDeM= +github.com/tektoncd/pipeline v0.24.1/go.mod h1:ChFD/vfu14VOtCVlLWdtlvOwXfBfVotULoNV6yz+CKY= +github.com/tektoncd/pipeline v0.27.1-0.20210830154614-c8c729131d4a/go.mod h1:U6p87Pzl8b7Lid1HrMabDFDnKstf6ZmkSLKgPiAkQxY= +github.com/tektoncd/pipeline v0.32.1 h1:tAfTVITbYWLDwWmVX5F8q/V+XWZ9qJtRKLeWtRx3kRs= +github.com/tektoncd/pipeline v0.32.1/go.mod h1:dO84qW4sTq7S7Jv5G0PRmbTjvxnUAuDDxD1NBrsQX9w= +github.com/tektoncd/plumbing v0.0.0-20210420200944-17170d5e7bc9/go.mod h1:WTWwsg91xgm+jPOKoyKVK/yRYxnVDlUYeDlypB1lDdQ= +github.com/tektoncd/plumbing v0.0.0-20210514044347-f8a9689d5bd5/go.mod h1:WTWwsg91xgm+jPOKoyKVK/yRYxnVDlUYeDlypB1lDdQ= +github.com/tektoncd/plumbing v0.0.0-20211012143332-c7cc43d9bc0c/go.mod h1:b9esRuV1absBvaPzKkjYdKXjC5Tgs8/vh1sz++RiTdc= +github.com/tektoncd/triggers v0.18.0 h1:JDQfaxFrljQ5TZdOE1UaPICTxhBKyIscdM2omD3ItRc= +github.com/tektoncd/triggers v0.18.0/go.mod h1:5+5p8FMT99EhR5mYEaxAVaYpPhxCH0mZF6sf9AYBSVk= +github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= +github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= +github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/thediveo/enumflag v0.10.0 h1:CQIGWQYgFIKTUvRf5FL3K5ksQzzBag/HSUCu3Oin+Jw= github.com/thediveo/enumflag v0.10.0/go.mod h1:zbsjXNJ3Np5ZXjUDgXeXsF2THwQd6ybEKMLw9XnQZ8o= +github.com/tidwall/gjson v1.10.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.3/go.mod h1:5WdjKx3AQMvCJ4RG6/2UYT7dLrGvJUV1x4jdTAyGvZs= +github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs= +github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomarrell/wrapcheck/v2 v2.4.0/go.mod h1:68bQ/eJg55BROaRTbMjC7vuhL2OgfoG8bLp9ZyoBfyY= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tsenart/go-tsz v0.0.0-20180814232043-cdeb9e1e981e/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo= github.com/tsenart/go-tsz v0.0.0-20180814235614-0bd30b3df1c3/go.mod h1:SWZznP1z5Ki7hDT2ioqiFKEse8K9tU2OUvaRI0NeGQo= github.com/tsenart/vegeta v12.7.1-0.20190725001342-b5f4fca92137+incompatible/go.mod h1:Smz/ZWfhKRcyDDChZkG3CyTHdj87lHzio/HOCkbndXM= github.com/tsenart/vegeta/v12 v12.8.4/go.mod h1:ZiJtwLn/9M4fTPdMY7bdbIeyNeFVE8/AHbWFqCsUuho= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ulikunitz/xz v0.5.9/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.30.0/go.mod h1:2rsYD01CKFrjjsvFxx75KlEUNpWNBY9JWD3K/7o2Cus= +github.com/valyala/quicktemplate v1.7.0/go.mod h1:sqKJnoaOF88V07vkO+9FL8fb9uZg/VPSJnLYn+LmLk8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vbatts/tar-split v0.11.1/go.mod h1:LEuURwDEiWjRjwu46yU3KVGuUdVv/dcnpcEPSzR8z6g= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +github.com/vbauerster/mpb/v5 v5.4.0/go.mod h1:fi4wVo7BVQ22QcvFObm+VwliQXlV1eBT8JDaKXR4JGI= github.com/vdemeester/k8s-pkg-credentialprovider v1.19.7/go.mod h1:K2nMO14cgZitdwBqdQps9tInJgcaXcU/7q5F59lpbNI= +github.com/vdemeester/k8s-pkg-credentialprovider v1.20.7/go.mod h1:K2nMO14cgZitdwBqdQps9tInJgcaXcU/7q5F59lpbNI= github.com/vdemeester/k8s-pkg-credentialprovider v1.21.0-1/go.mod h1:l4LxiP0cmEcc5q4BTDE8tZSyIiyXe0T28x37yHpMzoM= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= @@ -1253,22 +1901,35 @@ github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59b github.com/wavesoftware/go-ensure v1.0.0 h1:6X3gQL5psBWwtu/H9a+69xQ+JGTUELaLhgOB/iB3AQk= github.com/wavesoftware/go-ensure v1.0.0/go.mod h1:K2UAFSwMTvpiRGay/M3aEYYuurcR8S4A6HkQlJPV8k4= github.com/wavesoftware/go-magetasks v0.6.0/go.mod h1:8/zmKLAbJFwjAZOF8U2BnmVa9aDrLPtd4XO5b67uflY= +github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU= +github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca h1:1CFlNzQhALwjS9mBAUkycX616GzgsuYUOCHA5+HSlXI= github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yeya24/promlinter v0.1.0/go.mod h1:rs5vtZzeBHqqMwXqFScncpCF6u06lezhZepno9AB1Oc= +github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= +github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= +github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1279,10 +1940,14 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zach-klippenstein/goregen v0.0.0-20160303162051-795b5e3961ea/go.mod h1:eNr558nEUjP8acGw8FFjTeWvSgU1stO7FAO6eknhHe4= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= go.etcd.io/etcd v0.5.0-alpha.5.0.20200819165624-17cef6e3e9d5/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= @@ -1291,6 +1956,12 @@ go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsX go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= +go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= +go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1321,35 +1992,56 @@ go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+ go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= +goa.design/goa/v3 v3.0.9/go.mod h1:oJR8VOFa4HV7wCQv4XhPtvyknz0B3VFFRUWDdEmI0FI= +goa.design/goa/v3 v3.1.3/go.mod h1:GEog3KvHosQPKrrZSlpXDSnm7PpmzZEiy3mLxI/FtXM= +goa.design/goa/v3 v3.3.1/go.mod h1:Br8id+evuay7oUIK3BXr6iFJVLxAiirb+FPzfYUYOWg= +goa.design/plugins/v3 v3.1.3/go.mod h1:j4gOWwZpEBsNmONKitV/tBlxG6Yu7unFCU1HJmqjiC8= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190829043050-9756ffdc2472/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1357,16 +2049,20 @@ golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210920023735-84f357641f63 h1:kETrAMYZq6WVGPa8IIixL0CaEcIUNi+1WX7grUoi3y8= golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190731235908-ec7cb31e5a56/go.mod h1:JhuoJpWY28nO4Vef9tZUw9qufEGTyX1+7lmHxV5q5G4= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= @@ -1374,6 +2070,7 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1390,6 +2087,7 @@ golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPI golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20190806162312-597adff16ade/go.mod h1:AlhUtkH4DA4asiFC5RgK7ZKmauvtkAVcy9L0epCzlWo= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= @@ -1400,8 +2098,10 @@ golang.org/x/mod v0.3.1-0.20200828183125-ce943fd02449/go.mod h1:s0Qsj1ACt9ePp/hM golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1431,6 +2131,9 @@ golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1438,14 +2141,18 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200927032502-5d4f70055728/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200930145003-4acb6c075d10/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1458,16 +2165,24 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210326220855-61e056675ecf/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/net v0.0.0-20210331060903-cb1fcc7394e5/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210510120150-4163338589ed/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1493,6 +2208,7 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1512,25 +2228,33 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1543,8 +2267,10 @@ golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191029155521-f43be2a4598c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1562,7 +2288,9 @@ golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1586,6 +2314,7 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201218084310-7d0127a74742/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1593,26 +2322,34 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210326220804-49726bf1d181/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210608053332-aa57babbf139/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210915083310-ed5796bab164/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc= @@ -1620,8 +2357,10 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1640,6 +2379,7 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1648,17 +2388,27 @@ golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= @@ -1666,17 +2416,25 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190807223507-b346f7fd45de/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191030232956-1e24073be82c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1685,6 +2443,7 @@ golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1694,28 +2453,54 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200313205530-4303120df7d8/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200422022333-3d57cf2e726e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512001501-aaeff5de670a/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20200929161345-d7fc70abf50f/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201114224030-61ea331ec02b/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201118003311-bd56c0adb394/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= +golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= @@ -1725,6 +2510,8 @@ golang.org/x/tools v0.1.6/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.8 h1:P1HhGGuLW4aAclzjtmJdf0mJOjVUZUzOTqkAkWL+l6w= golang.org/x/tools v0.1.8/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1760,6 +2547,7 @@ google.golang.org/api v0.25.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= @@ -1779,6 +2567,7 @@ google.golang.org/api v0.61.0 h1:TXXKS1slM3b2bZNJwD5DV/Tp6/M2cLzLOLh9PjDhrw8= google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= @@ -1788,16 +2577,21 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190716160619-c506a9f90610/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1812,6 +2606,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= @@ -1819,11 +2614,15 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200929141702-51c3e5b607fe/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1833,11 +2632,16 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210325224202-eed09b1b5210/go.mod h1:f2Bd7+2PlaVKmvKQ52aspJZXIDaRQBVdOOBfJ5i8OEs= +google.golang.org/genproto v0.0.0-20210330181207-2295ebbda0c6/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210416161957-9910b6c460de/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210603172842-58e84a565dcf/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210610141715-e7a9b787a5a4/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= @@ -1857,6 +2661,7 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12 h1:DN5b3HU13J4sMd/QjDx34U6afpaexKTDdop+26pdjdk= google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -1872,10 +2677,12 @@ google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8 google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= @@ -1918,12 +2725,17 @@ gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/evanphx/json-patch.v4 v4.9.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/h2non/gentleman.v1 v1.0.4/go.mod h1:JYuHVdFzS4MKOXe0o+chKJ4hCe6tqKKw9XH9YP6WFrg= +gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE= +gopkg.in/h2non/gock.v1 v1.0.16/go.mod h1:XVuDAssexPLwgxCLMvDTWNU5eqklsydR6I5phZ9oPB8= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -1935,15 +2747,25 @@ gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLn gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= +gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= +gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1951,6 +2773,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -1958,9 +2781,19 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20191026110619-0b21df46bc1d/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/driver/mysql v1.0.1/go.mod h1:KtqSthtg55lFp3S5kUXqlGaelnWpKitn4k1xZTnoiPw= +gorm.io/driver/postgres v1.0.0/go.mod h1:wtMFcOzmuA5QigNsgEIb7O5lhvH1tHAF1RbWmLWV4to= +gorm.io/driver/postgres v1.0.2/go.mod h1:FvRSYfBI9jEp6ZSjlpS9qNcSjxwYxFc03UOTrHdvvYA= +gorm.io/driver/sqlite v1.1.1/go.mod h1:hm2olEcl8Tmsc6eZyxYSeznnsDaMqamBvEXLNtBg4cI= +gorm.io/driver/sqlserver v1.0.2/go.mod h1:gb0Y9QePGgqjzrVyTQUZeh9zkd5v0iz71cM1B4ZycEY= +gorm.io/gorm v1.9.19/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.20.0/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.20.2/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.20.7/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= @@ -1971,19 +2804,14 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.5/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.19.7/go.mod h1:KTryDUT3l6Mtv7K2J2486PNL9DBns3wOYTkGR+iz63Y= -k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= -k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= -k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= -k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU= -k8s.io/api v0.21.1/go.mod h1:FstGROTmsSHBarKc8bylzXih8BLNYTiS3TZcsoEDg2s= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= k8s.io/api v0.21.4 h1:WtDkzTAuI31WZKDPeIYpEUA+WeUfXAmA7gwj6nzFfbc= k8s.io/api v0.21.4/go.mod h1:fTVGP+M4D8+00FN2cMnJqk/eb/GH53bvmNs2SVTmpFk= -k8s.io/apiextensions-apiserver v0.19.7/go.mod h1:XJNNtjISNNePDEUClHt/igzMpQcmjVVh88QH+PKztPU= k8s.io/apiextensions-apiserver v0.21.4 h1:HkajN/vmT/9HnFmUxvpXfSGkTCvH/ax4e3+j6mqWUDU= k8s.io/apiextensions-apiserver v0.21.4/go.mod h1:OoC8LhI9LnV+wKjZkXIBbLUwtnOGJiTRE33qctH5CIk= k8s.io/apimachinery v0.21.4 h1:KDq0lWZVslHkuE5I7iGAQHwpK0aDTlar1E7IWEc4CNw= @@ -1992,28 +2820,23 @@ k8s.io/apiserver v0.19.7/go.mod h1:DmWVQggNePspa+vSsVytVbS3iBSDTXdJVt0akfHacKk= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.20.7/go.mod h1:7gbB7UjDdP1/epYBGnIUE6jWY4Wpz99cZ7igfDa9rv4= k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg= k8s.io/apiserver v0.21.4/go.mod h1:SErUuFBBPZUcD2nsUU8hItxoYheqyYr2o/pCINEPW8g= k8s.io/cli-runtime v0.19.7/go.mod h1:UTtbWaGV/USZSrnvuW/lRZGM5OsemAT/q/Du/Ac+wKU= k8s.io/cli-runtime v0.21.4 h1:kvOzx6dKg+9wRuHTzSqo8tfTV6ixZCkmi+ag54s7mn8= k8s.io/cli-runtime v0.21.4/go.mod h1:eRbLHYkdVWzvG87yrkgGd8CqX6/+fAG9DTdAqTXmlRY= -k8s.io/client-go v0.19.7/go.mod h1:iytGI7S3kmv6bWnn+bSQUE4VlrEi4YFssvVB7J7Hvqg= -k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= -k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= -k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= -k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA= -k8s.io/client-go v0.21.1/go.mod h1:/kEw4RgW+3xnBGzvp9IWxKSNA+lXn3A7AuH3gdOAzLs= k8s.io/client-go v0.21.4 h1:tcwj167If+v+pIGrCjaPG7hFo6SqFPFCCgMJy+Vm8Jc= k8s.io/client-go v0.21.4/go.mod h1:t0/eMKyUAq/DoQ7vW8NVVA00/nomlwC+eInsS8PxSew= k8s.io/cloud-provider v0.19.7/go.mod h1:aO/VpUwkG+JQN7ZXc5WBLZ5NBXuq/Y5B6vri6U94PZ8= k8s.io/cloud-provider v0.21.0/go.mod h1:z17TQgu3JgUFjcgby8sj5X86YdVK5Pbt+jm/eYMZU9M= -k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/code-generator v0.21.4 h1:vO8jVuEGV4UF+/2s/88Qg05MokE/1QUFi/Q2YDgz++A= k8s.io/code-generator v0.21.4/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/component-base v0.19.7/go.mod h1:YX8spPBgwl3I6UGcSdQiEMAqRMSUsGQOW7SEr4+Qa3U= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.20.7/go.mod h1:878UWprXC07P2CWFg+jjvTfxJSlkHp1v2m1MTkNQnJY= k8s.io/component-base v0.21.0/go.mod h1:qvtjz6X0USWXbgmbfXR+Agik4RZ3jv2Bgr5QnZzdPYw= k8s.io/component-base v0.21.4/go.mod h1:ZKG0eHVX+tUDcaoIGpU3Vtk4TIjMddN9uhEWDmW6Nyg= k8s.io/controller-manager v0.21.0/go.mod h1:Ohy0GRNRKPVjB8C8G+dV+4aPn26m8HYUI6ejloUBvUA= @@ -2028,8 +2851,10 @@ k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8 k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201203183100-97869a43a9d9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20210915205010-39e73c8a59cd h1:WEhFhsVrgII9WzLqf1VTUUtwPHm+vxbRq8Ib6y9Jt4g= +k8s.io/gengo v0.0.0-20210203185629-de9496dff47b/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210915205010-39e73c8a59cd/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -2044,47 +2869,66 @@ k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210113233702-8566a335510f/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= -k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80= +k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= k8s.io/legacy-cloud-providers v0.19.7/go.mod h1:dsZk4gH9QIwAtHQ8CK0Ps257xlfgoXE3tMkMNhW2xDU= k8s.io/legacy-cloud-providers v0.21.0/go.mod h1:bNxo7gDg+PGkBmT/MFZswLTWdSWK9kAlS1s8DJca5q4= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ= k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g= +k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= knative.dev/caching v0.0.0-20210215030244-1212288570f0/go.mod h1:rAPalJe9Lx3jHffJpackk5WjZYl3j2QvXUgw0GPllxQ= +knative.dev/caching v0.0.0-20210803185815-4e553d2275a0/go.mod h1:Vs+HND39+KKaIQp9M3m3Jmt4YtznpitDQ3n53gxbDYQ= knative.dev/caching v0.0.0-20211101215439-72577a3c0ce1/go.mod h1:hz6tnVAmRLvPoprw+0wsA9IrZC3MF5l2Hn/2JT28Yk0= knative.dev/client v0.21.0/go.mod h1:1En9uxMhk62EReWR1d66/d3tnpkot/D3vBRfmuidFNc= knative.dev/client v0.27.0/go.mod h1:FmXziLwolJtqaH3Jxe8cxqWjBUHXU6LsRsMyzlMSFFM= knative.dev/control-protocol v0.0.0-20211101215039-a32569595694/go.mod h1:HlIoGfErGnSVKN6NI3iG5gXxCo9EcdV8sCDGW2IObJo= knative.dev/eventing v0.21.0/go.mod h1:JjbVEOTJJHqo9CTxbTfrMn018hG8fOr3UfBoCJ7KWaA= +knative.dev/eventing v0.25.0/go.mod h1:8jIsrnSONPgv+m63OTzpwZQJiQASYl77C3llCyYlBMU= knative.dev/eventing v0.27.0/go.mod h1:4ppWQEQ/2B66/YFENDmV1Gjxs4meLpz6UTUgLkkINt4= knative.dev/eventing v0.27.2 h1:Zslo5WhIxAHAztLjoSCcnW52SNO2hIumK9dMp9V/Zc8= knative.dev/eventing v0.27.2/go.mod h1:k/Xt54hNh9YfMSYdMTkJ0TNrkwU9hL+Frn+n9zsAdaE= knative.dev/eventing-kafka v0.27.0 h1:mfbQbK+g9PiBHL9FoLTNckQRvY4EzMfmR2z25zTKJeU= knative.dev/eventing-kafka v0.27.0/go.mod h1:bt9HdIsI9RMgiWfcsrDT2vH8ADGY+ehN4a6Bj4QH5Jo= knative.dev/hack v0.0.0-20210203173706-8368e1f6eacf/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20210325223819-b6ab329907d3/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20210622141627-e28525d8d260/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20210806075220-815cd312d65c/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20211101195839-11d193bf617b/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= knative.dev/hack v0.0.0-20211112152736-4de3924914b2/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= -knative.dev/hack v0.0.0-20211122163517-fe1340f21191 h1:yRUruaFxjk6tkubtg44Nya0phUm1idKZr5bFO30AkmE= knative.dev/hack v0.0.0-20211122163517-fe1340f21191/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20211203062838-e11ac125e707/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20220209225905-7331bb16ba00/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack v0.0.0-20220216040439-0456e8bf6547 h1:lEWsaG/yMLLp3onNushrawsHFXD4LXCXTo5FUUa2GiU= +knative.dev/hack v0.0.0-20220216040439-0456e8bf6547/go.mod h1:PHt8x8yX5Z9pPquBEfIj0X66f8iWkWfR0S/sarACJrI= +knative.dev/hack/schema v0.0.0-20210622141627-e28525d8d260/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/hack/schema v0.0.0-20211101195839-11d193bf617b/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/hack/schema v0.0.0-20211122163517-fe1340f21191/go.mod h1:ffjwmdcrH5vN3mPhO8RrF2KfNnbHeCE2C60A+2cv3U0= knative.dev/kn-plugin-source-kafka v0.27.0 h1:zOxYTlm/BuT6R7gSBgEVbSMFVgt0XfBKJlP1PM3mjks= knative.dev/kn-plugin-source-kafka v0.27.0/go.mod h1:lo7J8MK+NZAqdy0y7DsUlbVLizZNf37fEL+vOOBNw2Y= knative.dev/networking v0.0.0-20210215030235-088986a1c2a3/go.mod h1:pmAMQjMqQUxpK0UyjE71KljMs6rwDMVIAlvrZsU3I6Y= knative.dev/networking v0.0.0-20210216014426-94bfc013982b/go.mod h1:Crdn87hxdFd3Jj6PIyrjzGnr8OGHX35k5xo9jlOrjjA= +knative.dev/networking v0.0.0-20210803181815-acdfd41c575c/go.mod h1:UA9m1M3rGssy63gVwjSh7CYoWTKZNO8cnY9QsIu7tyo= knative.dev/networking v0.0.0-20211101215640-8c71a2708e7d h1:nCnuNfcLWuyAdZYgVfwSooa3+p2ebx+V+qhGXXF/FIk= knative.dev/networking v0.0.0-20211101215640-8c71a2708e7d/go.mod h1:7SKKM4MsBANrXNRZhb/zMkNjTdxYbNjwQDWgu+Fyye4= knative.dev/pkg v0.0.0-20210212203835-448ae657fb5f/go.mod h1:TJSdebQOWX5N2bszohOYVi0H1QtXbtlYLuMghAFBMhY= knative.dev/pkg v0.0.0-20210215165523-84c98f3c3e7a/go.mod h1:TJSdebQOWX5N2bszohOYVi0H1QtXbtlYLuMghAFBMhY= knative.dev/pkg v0.0.0-20210216013737-584933f8280b/go.mod h1:TJSdebQOWX5N2bszohOYVi0H1QtXbtlYLuMghAFBMhY= -knative.dev/pkg v0.0.0-20211101212339-96c0204a70dc h1:ldjbTpoMXUTgzw0IJsAFLdyA6/6QYRvz8IGPZOknEDg= +knative.dev/pkg v0.0.0-20210331065221-952fdd90dbb0/go.mod h1:PD5g8hUCXq6iR3tILjmZeJBvQfXGnHMPKryq54qHJhg= +knative.dev/pkg v0.0.0-20210803160015-21eb4c167cc5/go.mod h1:RPk5txNA3apR9X40D4MpUOP9/VqOG8CrtABWfOwGVS4= +knative.dev/pkg v0.0.0-20210827184538-2bd91f75571c/go.mod h1:jMSqkNMsrzuy+XR4Yr/BMy7SDVbUOl3KKB6+5MR+ZU8= knative.dev/pkg v0.0.0-20211101212339-96c0204a70dc/go.mod h1:SkfDk9bWIiNZD7XtILGkG7AKVyF/M6M0bGxLgl0SYL8= +knative.dev/pkg v0.0.0-20220104185830-52e42b760b54/go.mod h1:189cvGP0mwpqwZGFrLk5WuERIsNI/J6HuQ1CIX7SXxY= +knative.dev/pkg v0.0.0-20220215153400-3c00bb0157b9 h1:u0vYuNe0BTFvbqN8euaauL/kN3jR7mjmxMc+QRnWsUw= +knative.dev/pkg v0.0.0-20220215153400-3c00bb0157b9/go.mod h1:6ZoCgi60jSUn/WrwTGNAZbsz5/kmwiZZD8EovSLzYZ4= knative.dev/reconciler-test v0.0.0-20210216030508-77f50054d024/go.mod h1:RP/K5xJylB72Go6eAsXYEsQHp4zCCNMNjmsqhvq7wko= +knative.dev/reconciler-test v0.0.0-20210803183715-b61cc77c06f6/go.mod h1:+Kovy+G5zXZNcuO/uB+zfo37vFKZzsLIlWezt/nKMz0= knative.dev/reconciler-test v0.0.0-20211101214439-9839937c9b13/go.mod h1:gTsbLk496j/M9xqMpx/liyCQ0X3bwDpRtcs2Zzws364= knative.dev/serving v0.21.0/go.mod h1:PU9k1Y6YMG27XQldEu5agNkcebvSafUXKXPircQYCsE= +knative.dev/serving v0.25.0/go.mod h1:24E4fVyViFnz8aAaafzdrYKB7CAsQr4FMU7QXoIE6CI= knative.dev/serving v0.27.0/go.mod h1:Lcqv3K/DeDt6ZoeWTgpjxc/0teS7uMebnnU+gXIw99c= knative.dev/serving v0.27.1 h1:d3cazLiXkvFIcqPYGlV+qvGkMdqb5yI6kiw0FovC4dc= knative.dev/serving v0.27.1/go.mod h1:70KmpwlBztk82pHmwRUkPGasQChFmefUyZWMDOzEoKk= @@ -2093,6 +2937,10 @@ modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= +mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= diff --git a/openshift/release/kn.yaml b/openshift/release/kn.yaml index f9d83e4622..4b624f4580 100644 --- a/openshift/release/kn.yaml +++ b/openshift/release/kn.yaml @@ -3,10 +3,10 @@ plugins: module: knative.dev/kn-plugin-source-kafka pluginImportPath: knative.dev/kn-plugin-source-kafka/plugin version: v0.27.0 - #- name: func - #module: knative.dev/kn-plugin-func - #pluginImportPath: knative.dev/kn-plugin-func/plugin - #version: v0.20.0 - #replace: - # - module: knative.dev/kn-plugin-func - # version: main + - name: func + module: knative.dev/kn-plugin-func + pluginImportPath: knative.dev/kn-plugin-func/plugin + version: v0.21.0 + replace: + - module: knative.dev/kn-plugin-func + version: openshift-v0.21.0 diff --git a/pkg/kn/root/plugin_register.go b/pkg/kn/root/plugin_register.go index e7956794b3..6100004f3a 100644 --- a/pkg/kn/root/plugin_register.go +++ b/pkg/kn/root/plugin_register.go @@ -19,6 +19,7 @@ package root import ( // Add #plugins# import here. Don't remove this line, it triggers an automatic replacement. _ "knative.dev/kn-plugin-event/pkg/plugin" + _ "knative.dev/kn-plugin-func/plugin" _ "knative.dev/kn-plugin-source-kafka/plugin" ) diff --git a/vendor/github.com/AlecAivazis/survey/v2/.travis.yml b/vendor/github.com/AlecAivazis/survey/v2/.travis.yml new file mode 100644 index 0000000000..f96db1ac0e --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/.travis.yml @@ -0,0 +1,22 @@ +language: go + +go: + - 1.12 + +os: + - linux + - linux-ppc64le + - osx + - windows + +go_import_path: github.com/AlecAivazis/survey/v2 + +before_install: + - go get github.com/alecaivazis/run + +install: + - run install-deps + +script: + - run tests + # - run autoplay-tests diff --git a/vendor/github.com/AlecAivazis/survey/v2/CONTRIBUTING.md b/vendor/github.com/AlecAivazis/survey/v2/CONTRIBUTING.md new file mode 100644 index 0000000000..a84e06061e --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/CONTRIBUTING.md @@ -0,0 +1,77 @@ +# Contributing to Survey + +🎉🎉 First off, thanks for the interest in contributing to `survey`! 🎉🎉 + +The following is a set of guidelines to follow when contributing to this package. These are not hard rules, please use common sense and feel free to propose changes to this document in a pull request. + +## Table of Contents + +1. [Code of Conduct](#code-of-conduct) +1. [Getting Help](#getting-help) +1. [Filing a Bug Report](#how-to-file-a-bug-report) +1. [Suggesting an API change](#suggesting-an-api-change) +1. [Submitting a Contribution](#submitting-a-contribution) +1. [Writing and Running Tests](#writing-and-running-tests) + +## Code of Conduct + +This project and its contibutors are expected to uphold the [Go Community Code of Conduct](https://golang.org/conduct). By participating, you are expected to follow these guidelines. + +## Getting help + +Feel free to [open up an issue](https://github.com/AlecAivazis/survey/v2/issues/new) on GitHub when asking a question so others will be able to find it. Please remember to tag the issue with the `Question` label so the maintainers can get to your question as soon as possible. If the question is urgent, feel free to reach out to `@AlecAivazis` directly in the gophers slack channel. + +## How to file a bug report + +Bugs are tracked using the Github Issue tracker. When filing a bug, please remember to label the issue as a `Bug` and answer/provide the following: + +1. What operating system and terminal are you using? +1. An example that showcases the bug. +1. What did you expect to see? +1. What did you see instead? + +## Suggesting an API change + +If you have an idea, I'm more than happy to discuss it. Please open an issue and we can work through it. In order to maintain some sense of stability, additions to the top-level API are taken just as seriously as changes that break it. Adding stuff is much easier than removing it. + +## Submitting a contribution + +In order to maintain stability, most features get fully integrated in more than one PR. This allows for more opportunity to think through each API change without amassing large amounts of tech debt and API changes at once. If your feature can be broken into separate chunks, it will be able to be reviewed much quicker. For example, if the PR that implemented the `Validate` field was submitted in a PR separately from one that included `survey.Required`, it would be able to get merge without having to decide how many different `Validators` we want to provide as part of `survey`'s API. + +When submitting a contribution, + +- Provide a description of the feature or change +- Reference the ticket addressed by the PR if there is one +- Following community standards, add comments for all exported members so that all necessary information is available on godocs +- Remember to update the project README.md with changes to the high-level API +- Include both positive and negative unit tests (when applicable) +- Contributions with visual ramifications or interaction changes should be accompanied with the appropriate `go-expect` tests. For more information on writing these tests, see [Writing and Running Tests](#writing-and-running-tests) + +## Writing and running tests + +When submitting features, please add as many units tests as necessary to test both positive and negative cases. + +Integration tests for survey uses [go-expect](https://github.com/Netflix/go-expect) to expect a match on stdout and respond on stdin. Since `os.Stdout` in a `go test` process is not a TTY, you need a way to interpret terminal / ANSI escape sequences for things like `CursorLocation`. The stdin/stdout handled by `go-expect` is also multiplexed to a [virtual terminal](https://github.com/hinshun/vt10x). + +For example, you can extend the tests for Input by specifying the following test case: + +```go +{ + "Test Input prompt interaction", // Name of the test. + &Input{ // An implementation of the survey.Prompt interface. + Message: "What is your name?", + }, + func(c *expect.Console) { // An expect procedure. You can expect strings / regexps and + c.ExpectString("What is your name?") // write back strings / bytes to its psuedoterminal for survey. + c.SendLine("Johnny Appleseed") + c.ExpectEOF() // Nothing is read from the tty without an expect, and once an + // expectation is met, no further bytes are read. End your + // procedure with `c.ExpectEOF()` to read until survey finishes. + }, + "Johnny Appleseed", // The expected result. +} +``` + +If you want to write your own `go-expect` test from scratch, you'll need to instantiate a virtual terminal, +multiplex it into an `*expect.Console`, and hook up its tty with survey's optional stdio. Please see `go-expect` +[documentation](https://godoc.org/github.com/Netflix/go-expect) for more detail. diff --git a/vendor/github.com/AlecAivazis/survey/v2/Gopkg.lock b/vendor/github.com/AlecAivazis/survey/v2/Gopkg.lock new file mode 100644 index 0000000000..b764f30811 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/Gopkg.lock @@ -0,0 +1,78 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/Netflix/go-expect" + packages = ["."] + revision = "c93bf25de8e869da25cf26bcd2932b36141f61ae" + +[[projects]] + name = "github.com/davecgh/go-spew" + packages = ["spew"] + revision = "346938d642f2ec3594ed81d874461961cd0faa76" + version = "v1.1.0" + +[[projects]] + branch = "master" + name = "github.com/hinshun/vt10x" + packages = ["."] + revision = "1954e646417484a2a687ea344edade2c2b6523c8" + +[[projects]] + branch = "master" + name = "github.com/kballard/go-shellquote" + packages = ["."] + revision = "95032a82bc518f77982ea72343cc1ade730072f0" + +[[projects]] + name = "github.com/kr/pty" + packages = ["."] + revision = "282ce0e5322c82529687d609ee670fac7c7d917c" + version = "v1.1.1" + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "github.com/mgutz/ansi" + packages = ["."] + revision = "9520e82c474b0a04dd04f8a40959027271bab992" + +[[projects]] + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require" + ] + revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" + version = "v1.2.1" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "371508ebad4798adc38a118f858b5c17a65b58594203548f9feb74cb781dd907" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/AlecAivazis/survey/v2/Gopkg.toml b/vendor/github.com/AlecAivazis/survey/v2/Gopkg.toml new file mode 100644 index 0000000000..8acb825aac --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/Gopkg.toml @@ -0,0 +1,54 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" +# +# [prune] +# non-go = false +# go-tests = true +# unused-packages = true + + +[[constraint]] + branch = "master" + name = "github.com/Netflix/go-expect" + +[[constraint]] + branch = "master" + name = "github.com/hinshun/vt10x" + +[[constraint]] + name = "github.com/mattn/go-isatty" + version = "0.0.3" + +[[constraint]] + branch = "master" + name = "github.com/mgutz/ansi" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.1" + +[prune] + go-tests = true + unused-packages = true + +[[constraint]] + branch = "master" + name = "github.com/kballard/go-shellquote" diff --git a/vendor/github.com/AlecAivazis/survey/v2/LICENSE b/vendor/github.com/AlecAivazis/survey/v2/LICENSE new file mode 100644 index 0000000000..07a709ae28 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2018 Alec Aivazis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/AlecAivazis/survey/v2/README.md b/vendor/github.com/AlecAivazis/survey/v2/README.md new file mode 100644 index 0000000000..75f323e76f --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/README.md @@ -0,0 +1,478 @@ +# Survey + +[![Build Status](https://travis-ci.org/AlecAivazis/survey.svg?branch=feature%2Fpretty)](https://travis-ci.org/AlecAivazis/survey) +[![GoDoc](http://img.shields.io/badge/godoc-reference-5272B4.svg)](https://pkg.go.dev/github.com/AlecAivazis/survey/v2) + +A library for building interactive prompts on terminals supporting ANSI escape sequences. + + + +```go +package main + +import ( + "fmt" + "github.com/AlecAivazis/survey/v2" +) + +// the questions to ask +var qs = []*survey.Question{ + { + Name: "name", + Prompt: &survey.Input{Message: "What is your name?"}, + Validate: survey.Required, + Transform: survey.Title, + }, + { + Name: "color", + Prompt: &survey.Select{ + Message: "Choose a color:", + Options: []string{"red", "blue", "green"}, + Default: "red", + }, + }, + { + Name: "age", + Prompt: &survey.Input{Message: "How old are you?"}, + }, +} + +func main() { + // the answers will be written to this struct + answers := struct { + Name string // survey will match the question and field names + FavoriteColor string `survey:"color"` // or you can tag fields to match a specific name + Age int // if the types don't match, survey will convert it + }{} + + // perform the questions + err := survey.Ask(qs, &answers) + if err != nil { + fmt.Println(err.Error()) + return + } + + fmt.Printf("%s chose %s.", answers.Name, answers.FavoriteColor) +} +``` + +## Table of Contents + +1. [Examples](#examples) +1. [Running the Prompts](#running-the-prompts) +1. [Prompts](#prompts) + 1. [Input](#input) + 1. [Suggestion Options](#suggestion-options) + 1. [Multiline](#multiline) + 1. [Password](#password) + 1. [Confirm](#confirm) + 1. [Select](#select) + 1. [MultiSelect](#multiselect) + 1. [Editor](#editor) +1. [Filtering Options](#filtering-options) +1. [Validation](#validation) + 1. [Built-in Validators](#built-in-validators) +1. [Help Text](#help-text) + 1. [Changing the input rune](#changing-the-input-rune) +1. [Changing the Icons ](#changing-the-icons) +1. [Custom Types](#custom-types) +1. [Testing](#testing) +1. [FAQ](#faq) + +## Examples + +Examples can be found in the `examples/` directory. Run them +to see basic behavior: + +```bash +go get github.com/AlecAivazis/survey/v2 + +cd $GOPATH/src/github.com/AlecAivazis/survey + +go run examples/simple.go +go run examples/validation.go +``` + +## Running the Prompts + +There are two primary ways to execute prompts and start collecting information from your users: `Ask` and +`AskOne`. The primary difference is whether you are interested in collecting a single piece of information +or if you have a list of questions to ask whose answers should be collected in a single struct. +For most basic usecases, `Ask` should be enough. However, for surveys with complicated branching logic, +we recommend that you break out your questions into multiple calls to both of these functions to fit your needs. + +### Configuring the Prompts + +Most prompts take fine-grained configuration through fields on the structs you instantiate. It is also +possible to change survey's default behaviors by passing `AskOpts` to either `Ask` or `AskOne`. Examples +in this document will do both interchangeably: + +```golang +prompt := &Select{ + Message: "Choose a color:", + Options: []string{"red", "blue", "green"}, + // can pass a validator directly + Validate: survey.Required, +} + +// or define a default for the single call to `AskOne` +// the answer will get written to the color variable +survey.AskOne(prompt, &color, survey.WithValidator(survey.Required)) + +// or define a default for every entry in a list of questions +// the answer will get copied into the matching field of the struct as shown above +survey.Ask(questions, &answers, survey.WithValidator(survey.Required)) +``` + +## Prompts + +### Input + + + +```golang +name := "" +prompt := &survey.Input{ + Message: "ping", +} +survey.AskOne(prompt, &name) +``` + +#### Suggestion Options + + + +```golang +file := "" +prompt := &survey.Input{ + Message: "inform a file to save:", + Suggest: func (toComplete string) []string { + files, _ := filepath.Glob(toComplete + "*") + return files + }, +} +} +survey.AskOne(prompt, &file) +``` + +### Multiline + + + +```golang +text := "" +prompt := &survey.Multiline{ + Message: "ping", +} +survey.AskOne(prompt, &text) +``` + +### Password + + + +```golang +password := "" +prompt := &survey.Password{ + Message: "Please type your password", +} +survey.AskOne(prompt, &password) +``` + +### Confirm + + + +```golang +name := false +prompt := &survey.Confirm{ + Message: "Do you like pie?", +} +survey.AskOne(prompt, &name) +``` + +### Select + + + +```golang +color := "" +prompt := &survey.Select{ + Message: "Choose a color:", + Options: []string{"red", "blue", "green"}, +} +survey.AskOne(prompt, &color) +``` + +Fields and values that come from a `Select` prompt can be one of two different things. If you pass an `int` +the field will have the value of the selected index. If you instead pass a string, the string value selected +will be written to the field. + +The user can also press `esc` to toggle the ability cycle through the options with the j and k keys to do down and up respectively. + +By default, the select prompt is limited to showing 7 options at a time +and will paginate lists of options longer than that. This can be changed a number of ways: + +```golang +// as a field on a single select +prompt := &survey.MultiSelect{..., PageSize: 10} + +// or as an option to Ask or AskOne +survey.AskOne(prompt, &days, survey.WithPageSize(10)) +``` + +### MultiSelect + +![Example](img/multi-select-all-none.gif) + +```golang +days := []string{} +prompt := &survey.MultiSelect{ + Message: "What days do you prefer:", + Options: []string{"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, +} +survey.AskOne(prompt, &days) +``` + +Fields and values that come from a `MultiSelect` prompt can be one of two different things. If you pass an `int` +the field will have a slice of the selected indices. If you instead pass a string, a slice of the string values +selected will be written to the field. + +The user can also press `esc` to toggle the ability cycle through the options with the j and k keys to do down and up respectively. + +By default, the MultiSelect prompt is limited to showing 7 options at a time +and will paginate lists of options longer than that. This can be changed a number of ways: + +```golang +// as a field on a single select +prompt := &survey.MultiSelect{..., PageSize: 10} + +// or as an option to Ask or AskOne +survey.AskOne(prompt, &days, survey.WithPageSize(10)) +``` + +### Editor + +Launches the user's preferred editor (defined by the \$VISUAL or \$EDITOR environment variables) on a +temporary file. Once the user exits their editor, the contents of the temporary file are read in as +the result. If neither of those are present, notepad (on Windows) or vim (Linux or Mac) is used. + +You can also specify a [pattern](https://golang.org/pkg/io/ioutil/#TempFile) for the name of the temporary file. This +can be useful for ensuring syntax highlighting matches your usecase. + +```golang +prompt := &survey.Editor{ + Message: "Shell code snippet", + FileName: "*.sh", +} + +survey.AskOne(prompt, &content) +``` + +## Filtering Options + +By default, the user can filter for options in Select and MultiSelects by typing while the prompt +is active. This will filter out all options that don't contain the typed string anywhere in their name, ignoring case. + +A custom filter function can also be provided to change this behavior: + +```golang +func myFilter(filterValue string, optValue string, optIndex int) bool { + // only include the option if it includes the filter and has length greater than 5 + return strings.Contains(optValue, filterValue) && len(optValue) >= 5 +} + +// configure it for a specific prompt +&Select{ + Message: "Choose a color:", + Options: []string{"red", "blue", "green"}, + Filter: myFilter, +} + +// or define a default for all of the questions +survey.AskOne(prompt, &color, survey.WithFilter(myFilter)) +``` + +## Keeping the filter active + +By default the filter will disappear if the user selects one of the filtered elements. Once the user selects one element the filter setting is gone. + +However the user can prevent this from happening and keep the filter active for multiple selections in a e.g. MultiSelect: + +```golang +// configure it for a specific prompt +&Select{ + Message: "Choose a color:", + Options: []string{"light-green", "green", "dark-green", "red"}, + KeepFilter: true, +} + +// or define a default for all of the questions +survey.AskOne(prompt, &color, survey.WithKeepFilter(true)) +``` + +## Validation + +Validating individual responses for a particular question can be done by defining a +`Validate` field on the `survey.Question` to be validated. This function takes an +`interface{}` type and returns an error to show to the user, prompting them for another +response. Like usual, validators can be provided directly to the prompt or with `survey.WithValidator`: + +```golang +q := &survey.Question{ + Prompt: &survey.Input{Message: "Hello world validation"}, + Validate: func (val interface{}) error { + // since we are validating an Input, the assertion will always succeed + if str, ok := val.(string) ; !ok || len(str) > 10 { + return errors.New("This response cannot be longer than 10 characters.") + } + return nil + }, +} + +color := "" +prompt := &survey.Input{ Message: "Whats your name?" } + +// you can pass multiple validators here and survey will make sure each one passes +survey.AskOne(prompt, &color, survey.WithValidator(survey.Required)) +``` + +### Built-in Validators + +`survey` comes prepackaged with a few validators to fit common situations. Currently these +validators include: + +| name | valid types | description | notes | +| ------------ | ----------- | ----------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| Required | any | Rejects zero values of the response type | Boolean values pass straight through since the zero value (false) is a valid response | +| MinLength(n) | string | Enforces that a response is at least the given length | | +| MaxLength(n) | string | Enforces that a response is no longer than the given length | | + +## Help Text + +All of the prompts have a `Help` field which can be defined to provide more information to your users: + + + +```golang +&survey.Input{ + Message: "What is your phone number:", + Help: "Phone number should include the area code", +} +``` + +### Changing the input rune + +In some situations, `?` is a perfectly valid response. To handle this, you can change the rune that survey +looks for with `WithHelpInput`: + +```golang +import ( + "github.com/AlecAivazis/survey/v2" +) + +number := "" +prompt := &survey.Input{ + Message: "If you have this need, please give me a reasonable message.", + Help: "I couldn't come up with one.", +} + +survey.AskOne(prompt, &number, survey.WithHelpInput('^')) +``` + +## Changing the Icons + +Changing the icons and their color/format can be done by passing the `WithIcons` option. The format +follows the patterns outlined [here](https://github.com/mgutz/ansi#style-format). For example: + +```golang +import ( + "github.com/AlecAivazis/survey/v2" +) + +number := "" +prompt := &survey.Input{ + Message: "If you have this need, please give me a reasonable message.", + Help: "I couldn't come up with one.", +} + +survey.AskOne(prompt, &number, survey.WithIcons(func(icons *survey.IconSet) { + // you can set any icons + icons.Question.Text = "⁇" + // for more information on formatting the icons, see here: https://github.com/mgutz/ansi#style-format + icons.Question.Format = "yellow+hb" +})) +``` + +The icons and their default text and format are summarized below: + +| name | text | format | description | +| -------------- | ---- | ---------- | ------------------------------------------------------------- | +| Error | X | red | Before an error | +| Help | i | cyan | Before help text | +| Question | ? | green+hb | Before the message of a prompt | +| SelectFocus | > | green | Marks the current focus in `Select` and `MultiSelect` prompts | +| UnmarkedOption | [ ] | default+hb | Marks an unselected option in a `MultiSelect` prompt | +| MarkedOption | [x] | cyan+b | Marks a chosen selection in a `MultiSelect` prompt | + +## Custom Types + +survey will assign prompt answers to your custom types if they implement this interface: + +```golang +type Settable interface { + WriteAnswer(field string, value interface{}) error +} +``` + +Here is an example how to use them: + +```golang +type MyValue struct { + value string +} +func (my *MyValue) WriteAnswer(name string, value interface{}) error { + my.value = value.(string) +} + +myval := MyValue{} +survey.AskOne( + &survey.Input{ + Message: "Enter something:", + }, + &myval +) +``` + +## Testing + +You can test your program's interactive prompts using [go-expect](https://github.com/Netflix/go-expect). The library +can be used to expect a match on stdout and respond on stdin. Since `os.Stdout` in a `go test` process is not a TTY, +if you are manipulating the cursor or using `survey`, you will need a way to interpret terminal / ANSI escape sequences +for things like `CursorLocation`. `vt10x.NewVT10XConsole` will create a `go-expect` console that also multiplexes +stdio to an in-memory [virtual terminal](https://github.com/hinshun/vt10x). + +For some examples, you can see any of the tests in this repo. + +## FAQ + +### What kinds of IO are supported by `survey`? +survey aims to support most terminal emulators; it expects support for ANSI escape sequences. +This means that reading from piped stdin or writing to piped stdout is **not supported**, +and likely to break your application in these situations. See [#337](https://github.com/AlecAivazis/survey/pull/337#issue-581351617) + +### Why isn't sending a SIGINT (aka. CTRL-C) signal working? + +When you send an interrupt signal to the process, it only interrupts the current prompt instead of the entire process. This manifests in a `github.com/AlecAivazis/survey/v2/terminal.InterruptErr` being returned from `Ask` and `AskOne`. If you want to stop the process, handle the returned error in your code: + +```go +err := survey.AskOne(prompt, &myVar) +if err == terminal.InterruptErr { + fmt.Println("interrupted") + + os.Exit(0) +} else if err != nil { + panic(err) +} +``` + diff --git a/vendor/github.com/AlecAivazis/survey/v2/_tasks.hcl b/vendor/github.com/AlecAivazis/survey/v2/_tasks.hcl new file mode 100644 index 0000000000..0eefcb5486 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/_tasks.hcl @@ -0,0 +1,19 @@ +task "install-deps" { + description = "Install all of package dependencies" + pipeline = [ + "go get -v {{.files}}", + ] +} + +task "tests" { + description = "Run the test suite" + command = "go test -v {{.files}}" + environment = { + GOFLAGS = "-mod=vendor" + } +} + +variables = { + files = "$(go list -v ./... | grep -iEv \"tests|examples\")" +} + diff --git a/vendor/github.com/AlecAivazis/survey/v2/confirm.go b/vendor/github.com/AlecAivazis/survey/v2/confirm.go new file mode 100644 index 0000000000..8e5d7efaaf --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/confirm.go @@ -0,0 +1,153 @@ +package survey + +import ( + "fmt" + "regexp" +) + +// Confirm is a regular text input that accept yes/no answers. Response type is a bool. +type Confirm struct { + Renderer + Message string + Default bool + Help string +} + +// data available to the templates when processing +type ConfirmTemplateData struct { + Confirm + Answer string + ShowHelp bool + Config *PromptConfig +} + +// Templates with Color formatting. See Documentation: https://github.com/mgutz/ansi#style-format +var ConfirmQuestionTemplate = ` +{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} +{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} +{{- color "default+hb"}}{{ .Message }} {{color "reset"}} +{{- if .Answer}} + {{- color "cyan"}}{{.Answer}}{{color "reset"}}{{"\n"}} +{{- else }} + {{- if and .Help (not .ShowHelp)}}{{color "cyan"}}[{{ .Config.HelpInput }} for help]{{color "reset"}} {{end}} + {{- color "white"}}{{if .Default}}(Y/n) {{else}}(y/N) {{end}}{{color "reset"}} +{{- end}}` + +// the regex for answers +var ( + yesRx = regexp.MustCompile("^(?i:y(?:es)?)$") + noRx = regexp.MustCompile("^(?i:n(?:o)?)$") +) + +func yesNo(t bool) string { + if t { + return "Yes" + } + return "No" +} + +func (c *Confirm) getBool(showHelp bool, config *PromptConfig) (bool, error) { + cursor := c.NewCursor() + rr := c.NewRuneReader() + rr.SetTermMode() + defer rr.RestoreTermMode() + + // start waiting for input + for { + line, err := rr.ReadLine(0) + if err != nil { + return false, err + } + // move back up a line to compensate for the \n echoed from terminal + cursor.PreviousLine(1) + val := string(line) + + // get the answer that matches the + var answer bool + switch { + case yesRx.Match([]byte(val)): + answer = true + case noRx.Match([]byte(val)): + answer = false + case val == "": + answer = c.Default + case val == config.HelpInput && c.Help != "": + err := c.Render( + ConfirmQuestionTemplate, + ConfirmTemplateData{ + Confirm: *c, + ShowHelp: true, + Config: config, + }, + ) + if err != nil { + // use the default value and bubble up + return c.Default, err + } + showHelp = true + continue + default: + // we didnt get a valid answer, so print error and prompt again + if err := c.Error(config, fmt.Errorf("%q is not a valid answer, please try again.", val)); err != nil { + return c.Default, err + } + err := c.Render( + ConfirmQuestionTemplate, + ConfirmTemplateData{ + Confirm: *c, + ShowHelp: showHelp, + Config: config, + }, + ) + if err != nil { + // use the default value and bubble up + return c.Default, err + } + continue + } + return answer, nil + } + // should not get here + return c.Default, nil +} + +/* +Prompt prompts the user with a simple text field and expects a reply followed +by a carriage return. + + likesPie := false + prompt := &survey.Confirm{ Message: "What is your name?" } + survey.AskOne(prompt, &likesPie) +*/ +func (c *Confirm) Prompt(config *PromptConfig) (interface{}, error) { + // render the question template + err := c.Render( + ConfirmQuestionTemplate, + ConfirmTemplateData{ + Confirm: *c, + Config: config, + }, + ) + if err != nil { + return "", err + } + + // get input and return + return c.getBool(false, config) +} + +// Cleanup overwrite the line with the finalized formatted version +func (c *Confirm) Cleanup(config *PromptConfig, val interface{}) error { + // if the value was previously true + ans := yesNo(val.(bool)) + + // render the template + return c.Render( + ConfirmQuestionTemplate, + ConfirmTemplateData{ + Confirm: *c, + Answer: ans, + Config: config, + }, + ) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/core/template.go b/vendor/github.com/AlecAivazis/survey/v2/core/template.go new file mode 100644 index 0000000000..24dade3388 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/core/template.go @@ -0,0 +1,91 @@ +package core + +import ( + "bytes" + "sync" + "text/template" + + "github.com/mgutz/ansi" +) + +// DisableColor can be used to make testing reliable +var DisableColor = false + +var TemplateFuncsWithColor = map[string]interface{}{ + // Templates with Color formatting. See Documentation: https://github.com/mgutz/ansi#style-format + "color": ansi.ColorCode, +} + +var TemplateFuncsNoColor = map[string]interface{}{ + // Templates without Color formatting. For layout/ testing. + "color": func(color string) string { + return "" + }, +} + +//RunTemplate returns two formatted strings given a template and +//the data it requires. The first string returned is generated for +//user-facing output and may or may not contain ANSI escape codes +//for colored output. The second string does not contain escape codes +//and can be used by the renderer for layout purposes. +func RunTemplate(tmpl string, data interface{}) (string, string, error) { + tPair, err := getTemplatePair(tmpl) + if err != nil { + return "", "", err + } + userBuf := bytes.NewBufferString("") + err = tPair[0].Execute(userBuf, data) + if err != nil { + return "", "", err + } + layoutBuf := bytes.NewBufferString("") + err = tPair[1].Execute(layoutBuf, data) + if err != nil { + return userBuf.String(), "", err + } + return userBuf.String(), layoutBuf.String(), err +} + +var ( + memoizedGetTemplate = map[string][2]*template.Template{} + + memoMutex = &sync.RWMutex{} +) + +//getTemplatePair returns a pair of compiled templates where the +//first template is generated for user-facing output and the +//second is generated for use by the renderer. The second +//template does not contain any color escape codes, whereas +//the first template may or may not depending on DisableColor. +func getTemplatePair(tmpl string) ([2]*template.Template, error) { + memoMutex.RLock() + if t, ok := memoizedGetTemplate[tmpl]; ok { + memoMutex.RUnlock() + return t, nil + } + memoMutex.RUnlock() + + templatePair := [2]*template.Template{nil, nil} + + templateNoColor, err := template.New("prompt").Funcs(TemplateFuncsNoColor).Parse(tmpl) + if err != nil { + return [2]*template.Template{}, err + } + + templatePair[1] = templateNoColor + + if DisableColor { + templatePair[0] = templatePair[1] + } else { + templateWithColor, err := template.New("prompt").Funcs(TemplateFuncsWithColor).Parse(tmpl) + templatePair[0] = templateWithColor + if err != nil { + return [2]*template.Template{}, err + } + } + + memoMutex.Lock() + memoizedGetTemplate[tmpl] = templatePair + memoMutex.Unlock() + return templatePair, nil +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/core/write.go b/vendor/github.com/AlecAivazis/survey/v2/core/write.go new file mode 100644 index 0000000000..47d0e498ca --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/core/write.go @@ -0,0 +1,356 @@ +package core + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" +) + +// the tag used to denote the name of the question +const tagName = "survey" + +// Settable allow for configuration when assigning answers +type Settable interface { + WriteAnswer(field string, value interface{}) error +} + +// OptionAnswer is the return type of Selects/MultiSelects that lets the appropriate information +// get copied to the user's struct +type OptionAnswer struct { + Value string + Index int +} + +func OptionAnswerList(incoming []string) []OptionAnswer { + list := []OptionAnswer{} + for i, opt := range incoming { + list = append(list, OptionAnswer{Value: opt, Index: i}) + } + return list +} + +func WriteAnswer(t interface{}, name string, v interface{}) (err error) { + // if the field is a custom type + if s, ok := t.(Settable); ok { + // use the interface method + return s.WriteAnswer(name, v) + } + + // the target to write to + target := reflect.ValueOf(t) + // the value to write from + value := reflect.ValueOf(v) + + // make sure we are writing to a pointer + if target.Kind() != reflect.Ptr { + return errors.New("you must pass a pointer as the target of a Write operation") + } + // the object "inside" of the target pointer + elem := target.Elem() + + // handle the special types + switch elem.Kind() { + // if we are writing to a struct + case reflect.Struct: + // if we are writing to an option answer than we want to treat + // it like a single thing and not a place to deposit answers + if elem.Type().Name() == "OptionAnswer" { + // copy the value over to the normal struct + return copy(elem, value) + } + + // get the name of the field that matches the string we were given + fieldIndex, err := findFieldIndex(elem, name) + // if something went wrong + if err != nil { + // bubble up + return err + } + field := elem.Field(fieldIndex) + // handle references to the Settable interface aswell + if s, ok := field.Interface().(Settable); ok { + // use the interface method + return s.WriteAnswer(name, v) + } + if field.CanAddr() { + if s, ok := field.Addr().Interface().(Settable); ok { + // use the interface method + return s.WriteAnswer(name, v) + } + } + + // copy the value over to the normal struct + return copy(field, value) + case reflect.Map: + mapType := reflect.TypeOf(t).Elem() + if mapType.Key().Kind() != reflect.String { + return errors.New("answer maps key must be of type string") + } + + // copy only string value/index value to map if, + // map is not of type interface and is 'OptionAnswer' + if value.Type().Name() == "OptionAnswer" { + if kval := mapType.Elem().Kind(); kval == reflect.String { + mt := *t.(*map[string]string) + mt[name] = value.FieldByName("Value").String() + return nil + } else if kval == reflect.Int { + mt := *t.(*map[string]int) + mt[name] = int(value.FieldByName("Index").Int()) + return nil + } + } + + if mapType.Elem().Kind() != reflect.Interface { + return errors.New("answer maps must be of type map[string]interface") + } + mt := *t.(*map[string]interface{}) + mt[name] = value.Interface() + return nil + } + // otherwise just copy the value to the target + return copy(elem, value) +} + +type errFieldNotMatch struct { + questionName string +} + +func (err errFieldNotMatch) Error() string { + return fmt.Sprintf("could not find field matching %v", err.questionName) +} + +func (err errFieldNotMatch) Is(target error) bool { // implements the dynamic errors.Is interface. + if target != nil { + if name, ok := IsFieldNotMatch(target); ok { + // if have a filled questionName then perform "deeper" comparison. + return name == "" || err.questionName == "" || name == err.questionName + } + } + + return false +} + +// IsFieldNotMatch reports whether an "err" is caused by a non matching field. +// It returns the Question.Name that couldn't be matched with a destination field. +// +// Usage: +// err := survey.Ask(qs, &v); +// if err != nil { +// if name, ok := core.IsFieldNotMatch(err); ok { +// [...name is the not matched question name] +// } +// } +func IsFieldNotMatch(err error) (string, bool) { + if err != nil { + if v, ok := err.(errFieldNotMatch); ok { + return v.questionName, true + } + } + + return "", false +} + +// BUG(AlecAivazis): the current implementation might cause weird conflicts if there are +// two fields with same name that only differ by casing. +func findFieldIndex(s reflect.Value, name string) (int, error) { + // the type of the value + sType := s.Type() + + // first look for matching tags so we can overwrite matching field names + for i := 0; i < sType.NumField(); i++ { + // the field we are current scanning + field := sType.Field(i) + + // the value of the survey tag + tag := field.Tag.Get(tagName) + // if the tag matches the name we are looking for + if tag != "" && tag == name { + // then we found our index + return i, nil + } + } + + // then look for matching names + for i := 0; i < sType.NumField(); i++ { + // the field we are current scanning + field := sType.Field(i) + + // if the name of the field matches what we're looking for + if strings.ToLower(field.Name) == strings.ToLower(name) { + return i, nil + } + } + + // we didn't find the field + return -1, errFieldNotMatch{name} +} + +// isList returns true if the element is something we can Len() +func isList(v reflect.Value) bool { + switch v.Type().Kind() { + case reflect.Array, reflect.Slice: + return true + default: + return false + } +} + +// Write takes a value and copies it to the target +func copy(t reflect.Value, v reflect.Value) (err error) { + // if something ends up panicing we need to catch it in a deferred func + defer func() { + if r := recover(); r != nil { + // if we paniced with an error + if _, ok := r.(error); ok { + // cast the result to an error object + err = r.(error) + } else if _, ok := r.(string); ok { + // otherwise we could have paniced with a string so wrap it in an error + err = errors.New(r.(string)) + } + } + }() + + // if we are copying from a string result to something else + if v.Kind() == reflect.String && v.Type() != t.Type() { + var castVal interface{} + var casterr error + vString := v.Interface().(string) + + switch t.Kind() { + case reflect.Bool: + castVal, casterr = strconv.ParseBool(vString) + case reflect.Int: + castVal, casterr = strconv.Atoi(vString) + case reflect.Int8: + var val64 int64 + val64, casterr = strconv.ParseInt(vString, 10, 8) + if casterr == nil { + castVal = int8(val64) + } + case reflect.Int16: + var val64 int64 + val64, casterr = strconv.ParseInt(vString, 10, 16) + if casterr == nil { + castVal = int16(val64) + } + case reflect.Int32: + var val64 int64 + val64, casterr = strconv.ParseInt(vString, 10, 32) + if casterr == nil { + castVal = int32(val64) + } + case reflect.Int64: + if t.Type() == reflect.TypeOf(time.Duration(0)) { + castVal, casterr = time.ParseDuration(vString) + } else { + castVal, casterr = strconv.ParseInt(vString, 10, 64) + } + case reflect.Uint: + var val64 uint64 + val64, casterr = strconv.ParseUint(vString, 10, 8) + if casterr == nil { + castVal = uint(val64) + } + case reflect.Uint8: + var val64 uint64 + val64, casterr = strconv.ParseUint(vString, 10, 8) + if casterr == nil { + castVal = uint8(val64) + } + case reflect.Uint16: + var val64 uint64 + val64, casterr = strconv.ParseUint(vString, 10, 16) + if casterr == nil { + castVal = uint16(val64) + } + case reflect.Uint32: + var val64 uint64 + val64, casterr = strconv.ParseUint(vString, 10, 32) + if casterr == nil { + castVal = uint32(val64) + } + case reflect.Uint64: + castVal, casterr = strconv.ParseUint(vString, 10, 64) + case reflect.Float32: + var val64 float64 + val64, casterr = strconv.ParseFloat(vString, 32) + if casterr == nil { + castVal = float32(val64) + } + case reflect.Float64: + castVal, casterr = strconv.ParseFloat(vString, 64) + default: + return fmt.Errorf("Unable to convert from string to type %s", t.Kind()) + } + + if casterr != nil { + return casterr + } + + t.Set(reflect.ValueOf(castVal)) + return + } + + // if we are copying from an OptionAnswer to something + if v.Type().Name() == "OptionAnswer" { + // copying an option answer to a string + if t.Kind() == reflect.String { + // copies the Value field of the struct + t.Set(reflect.ValueOf(v.FieldByName("Value").Interface())) + return + } + + // copying an option answer to an int + if t.Kind() == reflect.Int { + // copies the Index field of the struct + t.Set(reflect.ValueOf(v.FieldByName("Index").Interface())) + return + } + + // copying an OptionAnswer to an OptionAnswer + if t.Type().Name() == "OptionAnswer" { + t.Set(v) + return + } + + // we're copying an option answer to an incorrect type + return fmt.Errorf("Unable to convert from OptionAnswer to type %s", t.Kind()) + } + + // if we are copying from one slice or array to another + if isList(v) && isList(t) { + // loop over every item in the desired value + for i := 0; i < v.Len(); i++ { + // write to the target given its kind + switch t.Kind() { + // if its a slice + case reflect.Slice: + // an object of the correct type + obj := reflect.Indirect(reflect.New(t.Type().Elem())) + + // write the appropriate value to the obj and catch any errors + if err := copy(obj, v.Index(i)); err != nil { + return err + } + + // just append the value to the end + t.Set(reflect.Append(t, obj)) + // otherwise it could be an array + case reflect.Array: + // set the index to the appropriate value + copy(t.Slice(i, i+1).Index(0), v.Index(i)) + } + } + } else { + // set the value to the target + t.Set(v) + } + + // we're done + return +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/editor.go b/vendor/github.com/AlecAivazis/survey/v2/editor.go new file mode 100644 index 0000000000..ec6a141782 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/editor.go @@ -0,0 +1,222 @@ +package survey + +import ( + "bytes" + "io/ioutil" + "os" + "os/exec" + "runtime" + + "github.com/AlecAivazis/survey/v2/terminal" + shellquote "github.com/kballard/go-shellquote" +) + +/* +Editor launches an instance of the users preferred editor on a temporary file. +The editor to use is determined by reading the $VISUAL or $EDITOR environment +variables. If neither of those are present, notepad (on Windows) or vim +(others) is used. +The launch of the editor is triggered by the enter key. Since the response may +be long, it will not be echoed as Input does, instead, it print . +Response type is a string. + + message := "" + prompt := &survey.Editor{ Message: "What is your commit message?" } + survey.AskOne(prompt, &message) +*/ +type Editor struct { + Renderer + Message string + Default string + Help string + Editor string + HideDefault bool + AppendDefault bool + FileName string +} + +// data available to the templates when processing +type EditorTemplateData struct { + Editor + Answer string + ShowAnswer bool + ShowHelp bool + Config *PromptConfig +} + +// Templates with Color formatting. See Documentation: https://github.com/mgutz/ansi#style-format +var EditorQuestionTemplate = ` +{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} +{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} +{{- color "default+hb"}}{{ .Message }} {{color "reset"}} +{{- if .ShowAnswer}} + {{- color "cyan"}}{{.Answer}}{{color "reset"}}{{"\n"}} +{{- else }} + {{- if and .Help (not .ShowHelp)}}{{color "cyan"}}[{{ .Config.HelpInput }} for help]{{color "reset"}} {{end}} + {{- if and .Default (not .HideDefault)}}{{color "white"}}({{.Default}}) {{color "reset"}}{{end}} + {{- color "cyan"}}[Enter to launch editor] {{color "reset"}} +{{- end}}` + +var ( + bom = []byte{0xef, 0xbb, 0xbf} + editor = "vim" +) + +func init() { + if runtime.GOOS == "windows" { + editor = "notepad" + } + if v := os.Getenv("VISUAL"); v != "" { + editor = v + } else if e := os.Getenv("EDITOR"); e != "" { + editor = e + } +} + +func (e *Editor) PromptAgain(config *PromptConfig, invalid interface{}, err error) (interface{}, error) { + initialValue := invalid.(string) + return e.prompt(initialValue, config) +} + +func (e *Editor) Prompt(config *PromptConfig) (interface{}, error) { + initialValue := "" + if e.Default != "" && e.AppendDefault { + initialValue = e.Default + } + return e.prompt(initialValue, config) +} + +func (e *Editor) prompt(initialValue string, config *PromptConfig) (interface{}, error) { + // render the template + err := e.Render( + EditorQuestionTemplate, + EditorTemplateData{ + Editor: *e, + Config: config, + }, + ) + if err != nil { + return "", err + } + + // start reading runes from the standard in + rr := e.NewRuneReader() + rr.SetTermMode() + defer rr.RestoreTermMode() + + cursor := e.NewCursor() + cursor.Hide() + defer cursor.Show() + + for { + r, _, err := rr.ReadRune() + if err != nil { + return "", err + } + if r == '\r' || r == '\n' { + break + } + if r == terminal.KeyInterrupt { + return "", terminal.InterruptErr + } + if r == terminal.KeyEndTransmission { + break + } + if string(r) == config.HelpInput && e.Help != "" { + err = e.Render( + EditorQuestionTemplate, + EditorTemplateData{ + Editor: *e, + ShowHelp: true, + Config: config, + }, + ) + if err != nil { + return "", err + } + } + continue + } + + // prepare the temp file + pattern := e.FileName + if pattern == "" { + pattern = "survey*.txt" + } + f, err := ioutil.TempFile("", pattern) + if err != nil { + return "", err + } + defer os.Remove(f.Name()) + + // write utf8 BOM header + // The reason why we do this is because notepad.exe on Windows determines the + // encoding of an "empty" text file by the locale, for example, GBK in China, + // while golang string only handles utf8 well. However, a text file with utf8 + // BOM header is not considered "empty" on Windows, and the encoding will then + // be determined utf8 by notepad.exe, instead of GBK or other encodings. + if _, err := f.Write(bom); err != nil { + return "", err + } + + // write initial value + if _, err := f.WriteString(initialValue); err != nil { + return "", err + } + + // close the fd to prevent the editor unable to save file + if err := f.Close(); err != nil { + return "", err + } + + // check is input editor exist + if e.Editor != "" { + editor = e.Editor + } + + stdio := e.Stdio() + + args, err := shellquote.Split(editor) + if err != nil { + return "", err + } + args = append(args, f.Name()) + + // open the editor + cmd := exec.Command(args[0], args[1:]...) + cmd.Stdin = stdio.In + cmd.Stdout = stdio.Out + cmd.Stderr = stdio.Err + cursor.Show() + if err := cmd.Run(); err != nil { + return "", err + } + + // raw is a BOM-unstripped UTF8 byte slice + raw, err := ioutil.ReadFile(f.Name()) + if err != nil { + return "", err + } + + // strip BOM header + text := string(bytes.TrimPrefix(raw, bom)) + + // check length, return default value on empty + if len(text) == 0 && !e.AppendDefault { + return e.Default, nil + } + + return text, nil +} + +func (e *Editor) Cleanup(config *PromptConfig, val interface{}) error { + return e.Render( + EditorQuestionTemplate, + EditorTemplateData{ + Editor: *e, + Answer: "", + ShowAnswer: true, + Config: config, + }, + ) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/filter.go b/vendor/github.com/AlecAivazis/survey/v2/filter.go new file mode 100644 index 0000000000..56f702678f --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/filter.go @@ -0,0 +1 @@ +package survey diff --git a/vendor/github.com/AlecAivazis/survey/v2/go.mod b/vendor/github.com/AlecAivazis/survey/v2/go.mod new file mode 100644 index 0000000000..de87fb9f55 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/go.mod @@ -0,0 +1,19 @@ +module github.com/AlecAivazis/survey/v2 + +require ( + github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 + github.com/kr/pty v1.1.4 + github.com/mattn/go-colorable v0.1.2 // indirect + github.com/mattn/go-isatty v0.0.8 + github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.2.1 + golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5 // indirect + golang.org/x/term v0.0.0-20210503060354-a79de5458b56 + golang.org/x/text v0.3.3 +) + +go 1.13 diff --git a/vendor/github.com/AlecAivazis/survey/v2/go.sum b/vendor/github.com/AlecAivazis/survey/v2/go.sum new file mode 100644 index 0000000000..93dd6e647b --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/go.sum @@ -0,0 +1,35 @@ +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= +github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= +github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kr/pty v1.1.4 h1:5Myjjh3JY/NaAi4IsUbHADytDyl1VE1Y9PXDlL+P/VQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.2.1 h1:52QO5WkIUcHGIR7EnGagH88x1bUzqGXTC5/1bDTUQ7U= +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5 h1:8dUaAV7K4uHsF56JQWkprecIQKdPHtR9jCHF5nB8uzc= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/AlecAivazis/survey/v2/input.go b/vendor/github.com/AlecAivazis/survey/v2/input.go new file mode 100644 index 0000000000..a1abab1456 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/input.go @@ -0,0 +1,225 @@ +package survey + +import ( + "errors" + + "github.com/AlecAivazis/survey/v2/core" + "github.com/AlecAivazis/survey/v2/terminal" +) + +/* +Input is a regular text input that prints each character the user types on the screen +and accepts the input with the enter key. Response type is a string. + + name := "" + prompt := &survey.Input{ Message: "What is your name?" } + survey.AskOne(prompt, &name) +*/ +type Input struct { + Renderer + Message string + Default string + Help string + Suggest func(toComplete string) []string + answer string + typedAnswer string + options []core.OptionAnswer + selectedIndex int + showingHelp bool +} + +// data available to the templates when processing +type InputTemplateData struct { + Input + ShowAnswer bool + ShowHelp bool + Answer string + PageEntries []core.OptionAnswer + SelectedIndex int + Config *PromptConfig +} + +// Templates with Color formatting. See Documentation: https://github.com/mgutz/ansi#style-format +var InputQuestionTemplate = ` +{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} +{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} +{{- color "default+hb"}}{{ .Message }} {{color "reset"}} +{{- if .ShowAnswer}} + {{- color "cyan"}}{{.Answer}}{{color "reset"}}{{"\n"}} +{{- else if .PageEntries -}} + {{- .Answer}} [Use arrows to move, enter to select, type to continue] + {{- "\n"}} + {{- range $ix, $choice := .PageEntries}} + {{- if eq $ix $.SelectedIndex }}{{color $.Config.Icons.SelectFocus.Format }}{{ $.Config.Icons.SelectFocus.Text }} {{else}}{{color "default"}} {{end}} + {{- $choice.Value}} + {{- color "reset"}}{{"\n"}} + {{- end}} +{{- else }} + {{- if or (and .Help (not .ShowHelp)) .Suggest }}{{color "cyan"}}[ + {{- if and .Help (not .ShowHelp)}}{{ print .Config.HelpInput }} for help {{- if and .Suggest}}, {{end}}{{end -}} + {{- if and .Suggest }}{{color "cyan"}}{{ print .Config.SuggestInput }} for suggestions{{end -}} + ]{{color "reset"}} {{end}} + {{- if .Default}}{{color "white"}}({{.Default}}) {{color "reset"}}{{end}} +{{- end}}` + +func (i *Input) onRune(config *PromptConfig) terminal.OnRuneFn { + return terminal.OnRuneFn(func(key rune, line []rune) ([]rune, bool, error) { + if i.options != nil && (key == terminal.KeyEnter || key == '\n') { + return []rune(i.answer), true, nil + } else if i.options != nil && key == terminal.KeyEscape { + i.answer = i.typedAnswer + i.options = nil + } else if key == terminal.KeyArrowUp && len(i.options) > 0 { + if i.selectedIndex == 0 { + i.selectedIndex = len(i.options) - 1 + } else { + i.selectedIndex-- + } + i.answer = i.options[i.selectedIndex].Value + } else if (key == terminal.KeyArrowDown || key == terminal.KeyTab) && len(i.options) > 0 { + if i.selectedIndex == len(i.options)-1 { + i.selectedIndex = 0 + } else { + i.selectedIndex++ + } + i.answer = i.options[i.selectedIndex].Value + } else if key == terminal.KeyTab && i.Suggest != nil { + i.answer = string(line) + i.typedAnswer = i.answer + options := i.Suggest(i.answer) + i.selectedIndex = 0 + if len(options) == 0 { + return line, false, nil + } + + i.answer = options[0] + if len(options) == 1 { + i.typedAnswer = i.answer + i.options = nil + } else { + i.options = core.OptionAnswerList(options) + } + } else { + if i.options == nil { + return line, false, nil + } + + if key >= terminal.KeySpace { + i.answer += string(key) + } + i.typedAnswer = i.answer + + i.options = nil + } + + pageSize := config.PageSize + opts, idx := paginate(pageSize, i.options, i.selectedIndex) + err := i.Render( + InputQuestionTemplate, + InputTemplateData{ + Input: *i, + Answer: i.answer, + ShowHelp: i.showingHelp, + SelectedIndex: idx, + PageEntries: opts, + Config: config, + }, + ) + + if err == nil { + err = readLineAgain + } + + return []rune(i.typedAnswer), true, err + }) +} + +var readLineAgain = errors.New("read line again") + +func (i *Input) Prompt(config *PromptConfig) (interface{}, error) { + // render the template + err := i.Render( + InputQuestionTemplate, + InputTemplateData{ + Input: *i, + Config: config, + ShowHelp: i.showingHelp, + }, + ) + if err != nil { + return "", err + } + + // start reading runes from the standard in + rr := i.NewRuneReader() + rr.SetTermMode() + defer rr.RestoreTermMode() + + cursor := i.NewCursor() + if !config.ShowCursor { + cursor.Hide() // hide the cursor + defer cursor.Show() // show the cursor when we're done + } + + var line []rune + + for { + if i.options != nil { + line = []rune{} + } + + line, err = rr.ReadLineWithDefault(0, line, i.onRune(config)) + if err == readLineAgain { + continue + } + + if err != nil { + return "", err + } + + break + } + + i.answer = string(line) + // readline print an empty line, go up before we render the follow up + cursor.Up(1) + + // if we ran into the help string + if i.answer == config.HelpInput && i.Help != "" { + // show the help and prompt again + i.showingHelp = true + return i.Prompt(config) + } + + // if the line is empty + if len(i.answer) == 0 { + // use the default value + return i.Default, err + } + + lineStr := i.answer + + i.AppendRenderedText(lineStr) + + // we're done + return lineStr, err +} + +func (i *Input) Cleanup(config *PromptConfig, val interface{}) error { + // use the default answer when cleaning up the prompt if necessary + ans := i.answer + if ans == "" && i.Default != "" { + ans = i.Default + } + + // render the cleanup + return i.Render( + InputQuestionTemplate, + InputTemplateData{ + Input: *i, + ShowAnswer: true, + Config: config, + Answer: ans, + }, + ) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/multiline.go b/vendor/github.com/AlecAivazis/survey/v2/multiline.go new file mode 100644 index 0000000000..d0523b7eb4 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/multiline.go @@ -0,0 +1,110 @@ +package survey + +import ( + "strings" + + "github.com/AlecAivazis/survey/v2/terminal" +) + +type Multiline struct { + Renderer + Message string + Default string + Help string +} + +// data available to the templates when processing +type MultilineTemplateData struct { + Multiline + Answer string + ShowAnswer bool + ShowHelp bool + Config *PromptConfig +} + +// Templates with Color formatting. See Documentation: https://github.com/mgutz/ansi#style-format +var MultilineQuestionTemplate = ` +{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} +{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} +{{- color "default+hb"}}{{ .Message }} {{color "reset"}} +{{- if .ShowAnswer}} + {{- "\n"}}{{color "cyan"}}{{.Answer}}{{color "reset"}} + {{- if .Answer }}{{ "\n" }}{{ end }} +{{- else }} + {{- if .Default}}{{color "white"}}({{.Default}}) {{color "reset"}}{{end}} + {{- color "cyan"}}[Enter 2 empty lines to finish]{{color "reset"}} +{{- end}}` + +func (i *Multiline) Prompt(config *PromptConfig) (interface{}, error) { + // render the template + err := i.Render( + MultilineQuestionTemplate, + MultilineTemplateData{ + Multiline: *i, + Config: config, + }, + ) + if err != nil { + return "", err + } + + // start reading runes from the standard in + rr := i.NewRuneReader() + rr.SetTermMode() + defer rr.RestoreTermMode() + + cursor := i.NewCursor() + + multiline := make([]string, 0) + + emptyOnce := false + // get the next line + for { + line := []rune{} + line, err = rr.ReadLine(0) + if err != nil { + return string(line), err + } + + if string(line) == "" { + if emptyOnce { + numLines := len(multiline) + 2 + cursor.PreviousLine(numLines) + for j := 0; j < numLines; j++ { + terminal.EraseLine(i.Stdio().Out, terminal.ERASE_LINE_ALL) + cursor.NextLine(1) + } + cursor.PreviousLine(numLines) + break + } + emptyOnce = true + } else { + emptyOnce = false + } + multiline = append(multiline, string(line)) + } + + val := strings.Join(multiline, "\n") + val = strings.TrimSpace(val) + + // if the line is empty + if len(val) == 0 { + // use the default value + return i.Default, err + } + + i.AppendRenderedText(val) + return val, err +} + +func (i *Multiline) Cleanup(config *PromptConfig, val interface{}) error { + return i.Render( + MultilineQuestionTemplate, + MultilineTemplateData{ + Multiline: *i, + Answer: val.(string), + ShowAnswer: true, + Config: config, + }, + ) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/multiselect.go b/vendor/github.com/AlecAivazis/survey/v2/multiselect.go new file mode 100644 index 0000000000..3d4f95b930 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/multiselect.go @@ -0,0 +1,331 @@ +package survey + +import ( + "errors" + "fmt" + + "github.com/AlecAivazis/survey/v2/core" + "github.com/AlecAivazis/survey/v2/terminal" +) + +/* +MultiSelect is a prompt that presents a list of various options to the user +for them to select using the arrow keys and enter. Response type is a slice of strings. + + days := []string{} + prompt := &survey.MultiSelect{ + Message: "What days do you prefer:", + Options: []string{"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday"}, + } + survey.AskOne(prompt, &days) +*/ +type MultiSelect struct { + Renderer + Message string + Options []string + Default interface{} + Help string + PageSize int + VimMode bool + FilterMessage string + Filter func(filter string, value string, index int) bool + filter string + selectedIndex int + checked map[int]bool + showingHelp bool +} + +// data available to the templates when processing +type MultiSelectTemplateData struct { + MultiSelect + Answer string + ShowAnswer bool + Checked map[int]bool + SelectedIndex int + ShowHelp bool + PageEntries []core.OptionAnswer + Config *PromptConfig +} + +var MultiSelectQuestionTemplate = ` +{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} +{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} +{{- color "default+hb"}}{{ .Message }}{{ .FilterMessage }}{{color "reset"}} +{{- if .ShowAnswer}}{{color "cyan"}} {{.Answer}}{{color "reset"}}{{"\n"}} +{{- else }} + {{- " "}}{{- color "cyan"}}[Use arrows to move, space to select, to all, to none, type to filter{{- if and .Help (not .ShowHelp)}}, {{ .Config.HelpInput }} for more help{{end}}]{{color "reset"}} + {{- "\n"}} + {{- range $ix, $option := .PageEntries}} + {{- if eq $ix $.SelectedIndex }}{{color $.Config.Icons.SelectFocus.Format }}{{ $.Config.Icons.SelectFocus.Text }}{{color "reset"}}{{else}} {{end}} + {{- if index $.Checked $option.Index }}{{color $.Config.Icons.MarkedOption.Format }} {{ $.Config.Icons.MarkedOption.Text }} {{else}}{{color $.Config.Icons.UnmarkedOption.Format }} {{ $.Config.Icons.UnmarkedOption.Text }} {{end}} + {{- color "reset"}} + {{- " "}}{{$option.Value}}{{"\n"}} + {{- end}} +{{- end}}` + +// OnChange is called on every keypress. +func (m *MultiSelect) OnChange(key rune, config *PromptConfig) { + options := m.filterOptions(config) + oldFilter := m.filter + + if key == terminal.KeyArrowUp || (m.VimMode && key == 'k') { + // if we are at the top of the list + if m.selectedIndex == 0 { + // go to the bottom + m.selectedIndex = len(options) - 1 + } else { + // decrement the selected index + m.selectedIndex-- + } + } else if key == terminal.KeyTab || key == terminal.KeyArrowDown || (m.VimMode && key == 'j') { + // if we are at the bottom of the list + if m.selectedIndex == len(options)-1 { + // start at the top + m.selectedIndex = 0 + } else { + // increment the selected index + m.selectedIndex++ + } + // if the user pressed down and there is room to move + } else if key == terminal.KeySpace { + // the option they have selected + if m.selectedIndex < len(options) { + selectedOpt := options[m.selectedIndex] + + // if we haven't seen this index before + if old, ok := m.checked[selectedOpt.Index]; !ok { + // set the value to true + m.checked[selectedOpt.Index] = true + } else { + // otherwise just invert the current value + m.checked[selectedOpt.Index] = !old + } + if !config.KeepFilter { + m.filter = "" + } + } + // only show the help message if we have one to show + } else if string(key) == config.HelpInput && m.Help != "" { + m.showingHelp = true + } else if key == terminal.KeyEscape { + m.VimMode = !m.VimMode + } else if key == terminal.KeyDeleteWord || key == terminal.KeyDeleteLine { + m.filter = "" + } else if key == terminal.KeyDelete || key == terminal.KeyBackspace { + if m.filter != "" { + runeFilter := []rune(m.filter) + m.filter = string(runeFilter[0 : len(runeFilter)-1]) + } + } else if key >= terminal.KeySpace { + m.filter += string(key) + m.VimMode = false + } else if key == terminal.KeyArrowRight { + for _, v := range options { + m.checked[v.Index] = true + } + if !config.KeepFilter { + m.filter = "" + } + } else if key == terminal.KeyArrowLeft { + for _, v := range options { + m.checked[v.Index] = false + } + if !config.KeepFilter { + m.filter = "" + } + } + + m.FilterMessage = "" + if m.filter != "" { + m.FilterMessage = " " + m.filter + } + if oldFilter != m.filter { + // filter changed + options = m.filterOptions(config) + if len(options) > 0 && len(options) <= m.selectedIndex { + m.selectedIndex = len(options) - 1 + } + } + // paginate the options + // figure out the page size + pageSize := m.PageSize + // if we dont have a specific one + if pageSize == 0 { + // grab the global value + pageSize = config.PageSize + } + + // TODO if we have started filtering and were looking at the end of a list + // and we have modified the filter then we should move the page back! + opts, idx := paginate(pageSize, options, m.selectedIndex) + + // render the options + m.Render( + MultiSelectQuestionTemplate, + MultiSelectTemplateData{ + MultiSelect: *m, + SelectedIndex: idx, + Checked: m.checked, + ShowHelp: m.showingHelp, + PageEntries: opts, + Config: config, + }, + ) +} + +func (m *MultiSelect) filterOptions(config *PromptConfig) []core.OptionAnswer { + // the filtered list + answers := []core.OptionAnswer{} + + // if there is no filter applied + if m.filter == "" { + // return all of the options + return core.OptionAnswerList(m.Options) + } + + // the filter to apply + filter := m.Filter + if filter == nil { + filter = config.Filter + } + + // apply the filter to each option + for i, opt := range m.Options { + // i the filter says to include the option + if filter(m.filter, opt, i) { + answers = append(answers, core.OptionAnswer{ + Index: i, + Value: opt, + }) + } + } + + // we're done here + return answers +} + +func (m *MultiSelect) Prompt(config *PromptConfig) (interface{}, error) { + // compute the default state + m.checked = make(map[int]bool) + // if there is a default + if m.Default != nil { + // if the default is string values + if defaultValues, ok := m.Default.([]string); ok { + for _, dflt := range defaultValues { + for i, opt := range m.Options { + // if the option corresponds to the default + if opt == dflt { + // we found our initial value + m.checked[i] = true + // stop looking + break + } + } + } + // if the default value is index values + } else if defaultIndices, ok := m.Default.([]int); ok { + // go over every index we need to enable by default + for _, idx := range defaultIndices { + // and enable it + m.checked[idx] = true + } + } + } + + // if there are no options to render + if len(m.Options) == 0 { + // we failed + return "", errors.New("please provide options to select from") + } + + // figure out the page size + pageSize := m.PageSize + // if we dont have a specific one + if pageSize == 0 { + // grab the global value + pageSize = config.PageSize + } + // paginate the options + // build up a list of option answers + opts, idx := paginate(pageSize, core.OptionAnswerList(m.Options), m.selectedIndex) + + cursor := m.NewCursor() + cursor.Hide() // hide the cursor + defer cursor.Show() // show the cursor when we're done + + // ask the question + err := m.Render( + MultiSelectQuestionTemplate, + MultiSelectTemplateData{ + MultiSelect: *m, + SelectedIndex: idx, + Checked: m.checked, + PageEntries: opts, + Config: config, + }, + ) + if err != nil { + return "", err + } + + rr := m.NewRuneReader() + rr.SetTermMode() + defer rr.RestoreTermMode() + + // start waiting for input + for { + r, _, err := rr.ReadRune() + if err != nil { + return "", err + } + if r == '\r' || r == '\n' { + break + } + if r == terminal.KeyInterrupt { + return "", terminal.InterruptErr + } + if r == terminal.KeyEndTransmission { + break + } + m.OnChange(r, config) + } + m.filter = "" + m.FilterMessage = "" + + answers := []core.OptionAnswer{} + for i, option := range m.Options { + if val, ok := m.checked[i]; ok && val { + answers = append(answers, core.OptionAnswer{Value: option, Index: i}) + } + } + + return answers, nil +} + +// Cleanup removes the options section, and renders the ask like a normal question. +func (m *MultiSelect) Cleanup(config *PromptConfig, val interface{}) error { + // the answer to show + answer := "" + for _, ans := range val.([]core.OptionAnswer) { + answer = fmt.Sprintf("%s, %s", answer, ans.Value) + } + + // if we answered anything + if len(answer) > 2 { + // remove the precending commas + answer = answer[2:] + } + + // execute the output summary template with the answer + return m.Render( + MultiSelectQuestionTemplate, + MultiSelectTemplateData{ + MultiSelect: *m, + SelectedIndex: m.selectedIndex, + Checked: m.checked, + Answer: answer, + ShowAnswer: true, + Config: config, + }, + ) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/password.go b/vendor/github.com/AlecAivazis/survey/v2/password.go new file mode 100644 index 0000000000..285bff5e75 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/password.go @@ -0,0 +1,101 @@ +package survey + +import ( + "fmt" + "strings" + + "github.com/AlecAivazis/survey/v2/core" + "github.com/AlecAivazis/survey/v2/terminal" +) + +/* +Password is like a normal Input but the text shows up as *'s and there is no default. Response +type is a string. + + password := "" + prompt := &survey.Password{ Message: "Please type your password" } + survey.AskOne(prompt, &password) +*/ +type Password struct { + Renderer + Message string + Help string +} + +type PasswordTemplateData struct { + Password + ShowHelp bool + Config *PromptConfig +} + +// PasswordQuestionTemplate is a template with color formatting. See Documentation: https://github.com/mgutz/ansi#style-format +var PasswordQuestionTemplate = ` +{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} +{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} +{{- color "default+hb"}}{{ .Message }} {{color "reset"}} +{{- if and .Help (not .ShowHelp)}}{{color "cyan"}}[{{ .Config.HelpInput }} for help]{{color "reset"}} {{end}}` + +func (p *Password) Prompt(config *PromptConfig) (interface{}, error) { + // render the question template + userOut, _, err := core.RunTemplate( + PasswordQuestionTemplate, + PasswordTemplateData{ + Password: *p, + Config: config, + }, + ) + fmt.Fprint(terminal.NewAnsiStdout(p.Stdio().Out), userOut) + if err != nil { + return "", err + } + + rr := p.NewRuneReader() + rr.SetTermMode() + defer rr.RestoreTermMode() + + // no help msg? Just return any response + if p.Help == "" { + line, err := rr.ReadLine('*') + return string(line), err + } + + cursor := p.NewCursor() + + line := []rune{} + // process answers looking for help prompt answer + for { + line, err = rr.ReadLine('*') + if err != nil { + return string(line), err + } + + if string(line) == config.HelpInput { + // terminal will echo the \n so we need to jump back up one row + cursor.PreviousLine(1) + + err = p.Render( + PasswordQuestionTemplate, + PasswordTemplateData{ + Password: *p, + ShowHelp: true, + Config: config, + }, + ) + if err != nil { + return "", err + } + continue + } + + break + } + + lineStr := string(line) + p.AppendRenderedText(strings.Repeat("*", len(lineStr))) + return lineStr, err +} + +// Cleanup hides the string with a fixed number of characters. +func (prompt *Password) Cleanup(config *PromptConfig, val interface{}) error { + return nil +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/renderer.go b/vendor/github.com/AlecAivazis/survey/v2/renderer.go new file mode 100644 index 0000000000..f49029a7c4 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/renderer.go @@ -0,0 +1,164 @@ +package survey + +import ( + "bytes" + "fmt" + "unicode/utf8" + + "github.com/AlecAivazis/survey/v2/core" + "github.com/AlecAivazis/survey/v2/terminal" + "golang.org/x/term" +) + +type Renderer struct { + stdio terminal.Stdio + renderedErrors bytes.Buffer + renderedText bytes.Buffer +} + +type ErrorTemplateData struct { + Error error + Icon Icon +} + +var ErrorTemplate = `{{color .Icon.Format }}{{ .Icon.Text }} Sorry, your reply was invalid: {{ .Error.Error }}{{color "reset"}} +` + +func (r *Renderer) WithStdio(stdio terminal.Stdio) { + r.stdio = stdio +} + +func (r *Renderer) Stdio() terminal.Stdio { + return r.stdio +} + +func (r *Renderer) NewRuneReader() *terminal.RuneReader { + return terminal.NewRuneReader(r.stdio) +} + +func (r *Renderer) NewCursor() *terminal.Cursor { + return &terminal.Cursor{ + In: r.stdio.In, + Out: r.stdio.Out, + } +} + +func (r *Renderer) Error(config *PromptConfig, invalid error) error { + // cleanup the currently rendered errors + r.resetPrompt(r.countLines(r.renderedErrors)) + r.renderedErrors.Reset() + + // cleanup the rest of the prompt + r.resetPrompt(r.countLines(r.renderedText)) + r.renderedText.Reset() + + userOut, layoutOut, err := core.RunTemplate(ErrorTemplate, &ErrorTemplateData{ + Error: invalid, + Icon: config.Icons.Error, + }) + if err != nil { + return err + } + + // send the message to the user + fmt.Fprint(terminal.NewAnsiStdout(r.stdio.Out), userOut) + + // add the printed text to the rendered error buffer so we can cleanup later + r.appendRenderedError(layoutOut) + + return nil +} + +func (r *Renderer) Render(tmpl string, data interface{}) error { + // cleanup the currently rendered text + lineCount := r.countLines(r.renderedText) + r.resetPrompt(lineCount) + r.renderedText.Reset() + + // render the template summarizing the current state + userOut, layoutOut, err := core.RunTemplate(tmpl, data) + if err != nil { + return err + } + + // print the summary + fmt.Fprint(terminal.NewAnsiStdout(r.stdio.Out), userOut) + + // add the printed text to the rendered text buffer so we can cleanup later + r.AppendRenderedText(layoutOut) + + // nothing went wrong + return nil +} + +// appendRenderedError appends text to the renderer's error buffer +// which is used to track what has been printed. It is not exported +// as errors should only be displayed via Error(config, error). +func (r *Renderer) appendRenderedError(text string) { + r.renderedErrors.WriteString(text) +} + +// AppendRenderedText appends text to the renderer's text buffer +// which is used to track of what has been printed. The buffer is used +// to calculate how many lines to erase before updating the prompt. +func (r *Renderer) AppendRenderedText(text string) { + r.renderedText.WriteString(text) +} + +func (r *Renderer) resetPrompt(lines int) { + // clean out current line in case tmpl didnt end in newline + cursor := r.NewCursor() + cursor.HorizontalAbsolute(0) + terminal.EraseLine(r.stdio.Out, terminal.ERASE_LINE_ALL) + // clean up what we left behind last time + for i := 0; i < lines; i++ { + cursor.PreviousLine(1) + terminal.EraseLine(r.stdio.Out, terminal.ERASE_LINE_ALL) + } +} + +func (r *Renderer) termWidth() (int, error) { + fd := int(r.stdio.Out.Fd()) + termWidth, _, err := term.GetSize(fd) + return termWidth, err +} + +// countLines will return the count of `\n` with the addition of any +// lines that have wrapped due to narrow terminal width +func (r *Renderer) countLines(buf bytes.Buffer) int { + w, err := r.termWidth() + if err != nil || w == 0 { + // if we got an error due to terminal.GetSize not being supported + // on current platform then just assume a very wide terminal + w = 10000 + } + + bufBytes := buf.Bytes() + + count := 0 + curr := 0 + delim := -1 + for curr < len(bufBytes) { + // read until the next newline or the end of the string + relDelim := bytes.IndexRune(bufBytes[curr:], '\n') + if relDelim != -1 { + count += 1 // new line found, add it to the count + delim = curr + relDelim + } else { + delim = len(bufBytes) // no new line found, read rest of text + } + + if lineWidth := utf8.RuneCount(bufBytes[curr:delim]); lineWidth > w { + // account for word wrapping + count += lineWidth / w + if (lineWidth % w) == 0 { + // content whose width is exactly a multiplier of available width should not + // count as having wrapped on the last line + count -= 1 + } + } + curr = delim + 1 + } + + return count +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/select.go b/vendor/github.com/AlecAivazis/survey/v2/select.go new file mode 100644 index 0000000000..fdce038173 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/select.go @@ -0,0 +1,329 @@ +package survey + +import ( + "errors" + + "github.com/AlecAivazis/survey/v2/core" + "github.com/AlecAivazis/survey/v2/terminal" +) + +/* +Select is a prompt that presents a list of various options to the user +for them to select using the arrow keys and enter. Response type is a string. + + color := "" + prompt := &survey.Select{ + Message: "Choose a color:", + Options: []string{"red", "blue", "green"}, + } + survey.AskOne(prompt, &color) +*/ +type Select struct { + Renderer + Message string + Options []string + Default interface{} + Help string + PageSize int + VimMode bool + FilterMessage string + Filter func(filter string, value string, index int) bool + filter string + selectedIndex int + useDefault bool + showingHelp bool +} + +// SelectTemplateData is the data available to the templates when processing +type SelectTemplateData struct { + Select + PageEntries []core.OptionAnswer + SelectedIndex int + Answer string + ShowAnswer bool + ShowHelp bool + Config *PromptConfig +} + +var SelectQuestionTemplate = ` +{{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} +{{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} +{{- color "default+hb"}}{{ .Message }}{{ .FilterMessage }}{{color "reset"}} +{{- if .ShowAnswer}}{{color "cyan"}} {{.Answer}}{{color "reset"}}{{"\n"}} +{{- else}} + {{- " "}}{{- color "cyan"}}[Use arrows to move, type to filter{{- if and .Help (not .ShowHelp)}}, {{ .Config.HelpInput }} for more help{{end}}]{{color "reset"}} + {{- "\n"}} + {{- range $ix, $choice := .PageEntries}} + {{- if eq $ix $.SelectedIndex }}{{color $.Config.Icons.SelectFocus.Format }}{{ $.Config.Icons.SelectFocus.Text }} {{else}}{{color "default"}} {{end}} + {{- $choice.Value}} + {{- color "reset"}}{{"\n"}} + {{- end}} +{{- end}}` + +// OnChange is called on every keypress. +func (s *Select) OnChange(key rune, config *PromptConfig) bool { + options := s.filterOptions(config) + oldFilter := s.filter + + // if the user pressed the enter key and the index is a valid option + if key == terminal.KeyEnter || key == '\n' { + // if the selected index is a valid option + if len(options) > 0 && s.selectedIndex < len(options) { + + // we're done (stop prompting the user) + return true + } + + // we're not done (keep prompting) + return false + + // if the user pressed the up arrow or 'k' to emulate vim + } else if (key == terminal.KeyArrowUp || (s.VimMode && key == 'k')) && len(options) > 0 { + s.useDefault = false + + // if we are at the top of the list + if s.selectedIndex == 0 { + // start from the button + s.selectedIndex = len(options) - 1 + } else { + // otherwise we are not at the top of the list so decrement the selected index + s.selectedIndex-- + } + + // if the user pressed down or 'j' to emulate vim + } else if (key == terminal.KeyTab || key == terminal.KeyArrowDown || (s.VimMode && key == 'j')) && len(options) > 0 { + s.useDefault = false + // if we are at the bottom of the list + if s.selectedIndex == len(options)-1 { + // start from the top + s.selectedIndex = 0 + } else { + // increment the selected index + s.selectedIndex++ + } + // only show the help message if we have one + } else if string(key) == config.HelpInput && s.Help != "" { + s.showingHelp = true + // if the user wants to toggle vim mode on/off + } else if key == terminal.KeyEscape { + s.VimMode = !s.VimMode + // if the user hits any of the keys that clear the filter + } else if key == terminal.KeyDeleteWord || key == terminal.KeyDeleteLine { + s.filter = "" + // if the user is deleting a character in the filter + } else if key == terminal.KeyDelete || key == terminal.KeyBackspace { + // if there is content in the filter to delete + if s.filter != "" { + runeFilter := []rune(s.filter) + // subtract a line from the current filter + s.filter = string(runeFilter[0 : len(runeFilter)-1]) + // we removed the last value in the filter + } + } else if key >= terminal.KeySpace { + s.filter += string(key) + // make sure vim mode is disabled + s.VimMode = false + // make sure that we use the current value in the filtered list + s.useDefault = false + } + + s.FilterMessage = "" + if s.filter != "" { + s.FilterMessage = " " + s.filter + } + if oldFilter != s.filter { + // filter changed + options = s.filterOptions(config) + if len(options) > 0 && len(options) <= s.selectedIndex { + s.selectedIndex = len(options) - 1 + } + } + + // figure out the options and index to render + // figure out the page size + pageSize := s.PageSize + // if we dont have a specific one + if pageSize == 0 { + // grab the global value + pageSize = config.PageSize + } + + // TODO if we have started filtering and were looking at the end of a list + // and we have modified the filter then we should move the page back! + opts, idx := paginate(pageSize, options, s.selectedIndex) + + // render the options + s.Render( + SelectQuestionTemplate, + SelectTemplateData{ + Select: *s, + SelectedIndex: idx, + ShowHelp: s.showingHelp, + PageEntries: opts, + Config: config, + }, + ) + + // keep prompting + return false +} + +func (s *Select) filterOptions(config *PromptConfig) []core.OptionAnswer { + // the filtered list + answers := []core.OptionAnswer{} + + // if there is no filter applied + if s.filter == "" { + return core.OptionAnswerList(s.Options) + } + + // the filter to apply + filter := s.Filter + if filter == nil { + filter = config.Filter + } + + // + for i, opt := range s.Options { + // i the filter says to include the option + if filter(s.filter, opt, i) { + answers = append(answers, core.OptionAnswer{ + Index: i, + Value: opt, + }) + } + } + + // return the list of answers + return answers +} + +func (s *Select) Prompt(config *PromptConfig) (interface{}, error) { + // if there are no options to render + if len(s.Options) == 0 { + // we failed + return "", errors.New("please provide options to select from") + } + + // start off with the first option selected + sel := 0 + // if there is a default + if s.Default != "" { + // find the choice + for i, opt := range s.Options { + // if the option corresponds to the default + if opt == s.Default { + // we found our initial value + sel = i + // stop looking + break + } + } + } + // save the selected index + s.selectedIndex = sel + + // figure out the page size + pageSize := s.PageSize + // if we dont have a specific one + if pageSize == 0 { + // grab the global value + pageSize = config.PageSize + } + + // figure out the options and index to render + opts, idx := paginate(pageSize, core.OptionAnswerList(s.Options), sel) + + // ask the question + err := s.Render( + SelectQuestionTemplate, + SelectTemplateData{ + Select: *s, + PageEntries: opts, + SelectedIndex: idx, + Config: config, + }, + ) + if err != nil { + return "", err + } + + // by default, use the default value + s.useDefault = true + + rr := s.NewRuneReader() + rr.SetTermMode() + defer rr.RestoreTermMode() + + cursor := s.NewCursor() + cursor.Hide() // hide the cursor + defer cursor.Show() // show the cursor when we're done + + // start waiting for input + for { + r, _, err := rr.ReadRune() + if err != nil { + return "", err + } + if r == terminal.KeyInterrupt { + return "", terminal.InterruptErr + } + if r == terminal.KeyEndTransmission { + break + } + if s.OnChange(r, config) { + break + } + } + options := s.filterOptions(config) + s.filter = "" + s.FilterMessage = "" + + // the index to report + var val string + // if we are supposed to use the default value + if s.useDefault || s.selectedIndex >= len(options) { + // if there is a default value + if s.Default != nil { + // if the default is a string + if defaultString, ok := s.Default.(string); ok { + // use the default value + val = defaultString + // the default value could also be an interpret which is interpretted as the index + } else if defaultIndex, ok := s.Default.(int); ok { + val = s.Options[defaultIndex] + } else { + return val, errors.New("default value of select must be an int or string") + } + } else if len(options) > 0 { + // there is no default value so use the first + val = options[0].Value + } + // otherwise the selected index points to the value + } else if s.selectedIndex < len(options) { + // the + val = options[s.selectedIndex].Value + } + + // now that we have the value lets go hunt down the right index to return + idx = -1 + for i, optionValue := range s.Options { + if optionValue == val { + idx = i + } + } + + return core.OptionAnswer{Value: val, Index: idx}, err +} + +func (s *Select) Cleanup(config *PromptConfig, val interface{}) error { + return s.Render( + SelectQuestionTemplate, + SelectTemplateData{ + Select: *s, + Answer: val.(core.OptionAnswer).Value, + ShowAnswer: true, + Config: config, + }, + ) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/survey.go b/vendor/github.com/AlecAivazis/survey/v2/survey.go new file mode 100644 index 0000000000..1944ffc5c6 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/survey.go @@ -0,0 +1,413 @@ +package survey + +import ( + "errors" + "io" + "os" + "strings" + + "github.com/AlecAivazis/survey/v2/core" + "github.com/AlecAivazis/survey/v2/terminal" +) + +// DefaultAskOptions is the default options on ask, using the OS stdio. +func defaultAskOptions() *AskOptions { + return &AskOptions{ + Stdio: terminal.Stdio{ + In: os.Stdin, + Out: os.Stdout, + Err: os.Stderr, + }, + PromptConfig: PromptConfig{ + PageSize: 7, + HelpInput: "?", + SuggestInput: "tab", + Icons: IconSet{ + Error: Icon{ + Text: "X", + Format: "red", + }, + Help: Icon{ + Text: "?", + Format: "cyan", + }, + Question: Icon{ + Text: "?", + Format: "green+hb", + }, + MarkedOption: Icon{ + Text: "[x]", + Format: "green", + }, + UnmarkedOption: Icon{ + Text: "[ ]", + Format: "default+hb", + }, + SelectFocus: Icon{ + Text: ">", + Format: "cyan+b", + }, + }, + Filter: func(filter string, value string, index int) (include bool) { + filter = strings.ToLower(filter) + + // include this option if it matches + return strings.Contains(strings.ToLower(value), filter) + }, + KeepFilter: false, + ShowCursor: false, + }, + } +} +func defaultPromptConfig() *PromptConfig { + return &defaultAskOptions().PromptConfig +} + +func defaultIcons() *IconSet { + return &defaultPromptConfig().Icons +} + +// OptionAnswer is an ergonomic alias for core.OptionAnswer +type OptionAnswer = core.OptionAnswer + +// Icon holds the text and format to show for a particular icon +type Icon struct { + Text string + Format string +} + +// IconSet holds the icons to use for various prompts +type IconSet struct { + HelpInput Icon + Error Icon + Help Icon + Question Icon + MarkedOption Icon + UnmarkedOption Icon + SelectFocus Icon +} + +// Validator is a function passed to a Question after a user has provided a response. +// If the function returns an error, then the user will be prompted again for another +// response. +type Validator func(ans interface{}) error + +// Transformer is a function passed to a Question after a user has provided a response. +// The function can be used to implement a custom logic that will result to return +// a different representation of the given answer. +// +// Look `TransformString`, `ToLower` `Title` and `ComposeTransformers` for more. +type Transformer func(ans interface{}) (newAns interface{}) + +// Question is the core data structure for a survey questionnaire. +type Question struct { + Name string + Prompt Prompt + Validate Validator + Transform Transformer +} + +// PromptConfig holds the global configuration for a prompt +type PromptConfig struct { + PageSize int + Icons IconSet + HelpInput string + SuggestInput string + Filter func(filter string, option string, index int) bool + KeepFilter bool + ShowCursor bool +} + +// Prompt is the primary interface for the objects that can take user input +// and return a response. +type Prompt interface { + Prompt(config *PromptConfig) (interface{}, error) + Cleanup(*PromptConfig, interface{}) error + Error(*PromptConfig, error) error +} + +// PromptAgainer Interface for Prompts that support prompting again after invalid input +type PromptAgainer interface { + PromptAgain(config *PromptConfig, invalid interface{}, err error) (interface{}, error) +} + +// AskOpt allows setting optional ask options. +type AskOpt func(options *AskOptions) error + +// AskOptions provides additional options on ask. +type AskOptions struct { + Stdio terminal.Stdio + Validators []Validator + PromptConfig PromptConfig +} + +// WithStdio specifies the standard input, output and error files survey +// interacts with. By default, these are os.Stdin, os.Stdout, and os.Stderr. +func WithStdio(in terminal.FileReader, out terminal.FileWriter, err io.Writer) AskOpt { + return func(options *AskOptions) error { + options.Stdio.In = in + options.Stdio.Out = out + options.Stdio.Err = err + return nil + } +} + +// WithFilter specifies the default filter to use when asking questions. +func WithFilter(filter func(filter string, value string, index int) (include bool)) AskOpt { + return func(options *AskOptions) error { + // save the filter internally + options.PromptConfig.Filter = filter + + return nil + } +} + +// WithKeepFilter sets the if the filter is kept after selections +func WithKeepFilter(KeepFilter bool) AskOpt { + return func(options *AskOptions) error { + // set the page size + options.PromptConfig.KeepFilter = KeepFilter + + // nothing went wrong + return nil + } +} + +// WithValidator specifies a validator to use while prompting the user +func WithValidator(v Validator) AskOpt { + return func(options *AskOptions) error { + // add the provided validator to the list + options.Validators = append(options.Validators, v) + + // nothing went wrong + return nil + } +} + +type wantsStdio interface { + WithStdio(terminal.Stdio) +} + +// WithPageSize sets the default page size used by prompts +func WithPageSize(pageSize int) AskOpt { + return func(options *AskOptions) error { + // set the page size + options.PromptConfig.PageSize = pageSize + + // nothing went wrong + return nil + } +} + +// WithHelpInput changes the character that prompts look for to give the user helpful information. +func WithHelpInput(r rune) AskOpt { + return func(options *AskOptions) error { + // set the input character + options.PromptConfig.HelpInput = string(r) + + // nothing went wrong + return nil + } +} + +// WithIcons sets the icons that will be used when prompting the user +func WithIcons(setIcons func(*IconSet)) AskOpt { + return func(options *AskOptions) error { + // update the default icons with whatever the user says + setIcons(&options.PromptConfig.Icons) + + // nothing went wrong + return nil + } +} + +// WithShowCursor sets the show cursor behavior when prompting the user +func WithShowCursor(ShowCursor bool) AskOpt { + return func(options *AskOptions) error { + // set the page size + options.PromptConfig.ShowCursor = ShowCursor + + // nothing went wrong + return nil + } +} + +/* +AskOne performs the prompt for a single prompt and asks for validation if required. +Response types should be something that can be casted from the response type designated +in the documentation. For example: + + name := "" + prompt := &survey.Input{ + Message: "name", + } + + survey.AskOne(prompt, &name) + +*/ +func AskOne(p Prompt, response interface{}, opts ...AskOpt) error { + err := Ask([]*Question{{Prompt: p}}, response, opts...) + if err != nil { + return err + } + + return nil +} + +/* +Ask performs the prompt loop, asking for validation when appropriate. The response +type can be one of two options. If a struct is passed, the answer will be written to +the field whose name matches the Name field on the corresponding question. Field types +should be something that can be casted from the response type designated in the +documentation. Note, a survey tag can also be used to identify a Otherwise, a +map[string]interface{} can be passed, responses will be written to the key with the +matching name. For example: + + qs := []*survey.Question{ + { + Name: "name", + Prompt: &survey.Input{Message: "What is your name?"}, + Validate: survey.Required, + Transform: survey.Title, + }, + } + + answers := struct{ Name string }{} + + + err := survey.Ask(qs, &answers) +*/ +func Ask(qs []*Question, response interface{}, opts ...AskOpt) error { + // build up the configuration options + options := defaultAskOptions() + for _, opt := range opts { + if opt == nil { + continue + } + if err := opt(options); err != nil { + return err + } + } + + // if we weren't passed a place to record the answers + if response == nil { + // we can't go any further + return errors.New("cannot call Ask() with a nil reference to record the answers") + } + + // go over every question + for _, q := range qs { + // If Prompt implements controllable stdio, pass in specified stdio. + if p, ok := q.Prompt.(wantsStdio); ok { + p.WithStdio(options.Stdio) + } + + // grab the user input and save it + ans, err := q.Prompt.Prompt(&options.PromptConfig) + // if there was a problem + if err != nil { + return err + } + + // build up a list of validators that we have to apply to this question + validators := []Validator{} + + // make sure to include the question specific one + if q.Validate != nil { + validators = append(validators, q.Validate) + } + // add any "global" validators + for _, validator := range options.Validators { + validators = append(validators, validator) + } + + // apply every validator to thte response + for _, validator := range validators { + // wait for a valid response + for invalid := validator(ans); invalid != nil; invalid = validator(ans) { + err := q.Prompt.Error(&options.PromptConfig, invalid) + // if there was a problem + if err != nil { + return err + } + + // ask for more input + if promptAgainer, ok := q.Prompt.(PromptAgainer); ok { + ans, err = promptAgainer.PromptAgain(&options.PromptConfig, ans, invalid) + } else { + ans, err = q.Prompt.Prompt(&options.PromptConfig) + } + // if there was a problem + if err != nil { + return err + } + } + } + + if q.Transform != nil { + // check if we have a transformer available, if so + // then try to acquire the new representation of the + // answer, if the resulting answer is not nil. + if newAns := q.Transform(ans); newAns != nil { + ans = newAns + } + } + + // tell the prompt to cleanup with the validated value + q.Prompt.Cleanup(&options.PromptConfig, ans) + + // if something went wrong + if err != nil { + // stop listening + return err + } + + // add it to the map + err = core.WriteAnswer(response, q.Name, ans) + // if something went wrong + if err != nil { + return err + } + + } + + // return the response + return nil +} + +// paginate returns a single page of choices given the page size, the total list of +// possible choices, and the current selected index in the total list. +func paginate(pageSize int, choices []core.OptionAnswer, sel int) ([]core.OptionAnswer, int) { + var start, end, cursor int + + if len(choices) < pageSize { + // if we dont have enough options to fill a page + start = 0 + end = len(choices) + cursor = sel + + } else if sel < pageSize/2 { + // if we are in the first half page + start = 0 + end = pageSize + cursor = sel + + } else if len(choices)-sel-1 < pageSize/2 { + // if we are in the last half page + start = len(choices) - pageSize + end = len(choices) + cursor = sel - start + + } else { + // somewhere in the middle + above := pageSize / 2 + below := pageSize - above + + cursor = pageSize / 2 + start = sel - above + end = sel + below + } + + // return the subset we care about and the index + return choices[start:end], cursor +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt b/vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt new file mode 100644 index 0000000000..ade5fef6d0 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2014 Takashi Kokubun + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/README.md b/vendor/github.com/AlecAivazis/survey/v2/terminal/README.md new file mode 100644 index 0000000000..d0b9db0fd4 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/README.md @@ -0,0 +1,3 @@ +# survey/terminal + +This package started as a copy of [kokuban/go-ansi](http://github.com/k0kubun/go-ansi) but has since been modified to fit survey's specific needs. diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/buffered_reader.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/buffered_reader.go new file mode 100644 index 0000000000..55eac15222 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/buffered_reader.go @@ -0,0 +1,22 @@ +package terminal + +import ( + "bytes" + "io" +) + +type BufferedReader struct { + In io.Reader + Buffer *bytes.Buffer +} + +func (br *BufferedReader) Read(p []byte) (int, error) { + n, err := br.Buffer.Read(p) + if err != nil && err != io.EOF { + return n, err + } else if err == nil { + return n, nil + } + + return br.In.Read(p[n:]) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/cursor.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/cursor.go new file mode 100644 index 0000000000..1ac74fd6f5 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/cursor.go @@ -0,0 +1,190 @@ +// +build !windows + +package terminal + +import ( + "bufio" + "bytes" + "fmt" + "io" + "regexp" + "strconv" +) + +var COORDINATE_SYSTEM_BEGIN Short = 1 + +var dsrPattern = regexp.MustCompile(`\x1b\[(\d+);(\d+)R$`) + +type Cursor struct { + In FileReader + Out FileWriter +} + +// Up moves the cursor n cells to up. +func (c *Cursor) Up(n int) { + fmt.Fprintf(c.Out, "\x1b[%dA", n) +} + +// Down moves the cursor n cells to down. +func (c *Cursor) Down(n int) { + fmt.Fprintf(c.Out, "\x1b[%dB", n) +} + +// Forward moves the cursor n cells to right. +func (c *Cursor) Forward(n int) { + fmt.Fprintf(c.Out, "\x1b[%dC", n) +} + +// Back moves the cursor n cells to left. +func (c *Cursor) Back(n int) { + fmt.Fprintf(c.Out, "\x1b[%dD", n) +} + +// NextLine moves cursor to beginning of the line n lines down. +func (c *Cursor) NextLine(n int) { + c.Down(1) + c.HorizontalAbsolute(0) +} + +// PreviousLine moves cursor to beginning of the line n lines up. +func (c *Cursor) PreviousLine(n int) { + c.Up(1) + c.HorizontalAbsolute(0) +} + +// HorizontalAbsolute moves cursor horizontally to x. +func (c *Cursor) HorizontalAbsolute(x int) { + fmt.Fprintf(c.Out, "\x1b[%dG", x) +} + +// Show shows the cursor. +func (c *Cursor) Show() { + fmt.Fprint(c.Out, "\x1b[?25h") +} + +// Hide hide the cursor. +func (c *Cursor) Hide() { + fmt.Fprint(c.Out, "\x1b[?25l") +} + +// Move moves the cursor to a specific x,y location. +func (c *Cursor) Move(x int, y int) { + fmt.Fprintf(c.Out, "\x1b[%d;%df", x, y) +} + +// Save saves the current position +func (c *Cursor) Save() { + fmt.Fprint(c.Out, "\x1b7") +} + +// Restore restores the saved position of the cursor +func (c *Cursor) Restore() { + fmt.Fprint(c.Out, "\x1b8") +} + +// for comparability purposes between windows +// in unix we need to print out a new line on some terminals +func (c *Cursor) MoveNextLine(cur *Coord, terminalSize *Coord) { + if cur.Y == terminalSize.Y { + fmt.Fprintln(c.Out) + } + c.NextLine(1) +} + +// Location returns the current location of the cursor in the terminal +func (c *Cursor) Location(buf *bytes.Buffer) (*Coord, error) { + // ANSI escape sequence for DSR - Device Status Report + // https://en.wikipedia.org/wiki/ANSI_escape_code#CSI_sequences + fmt.Fprint(c.Out, "\x1b[6n") + + // There may be input in Stdin prior to CursorLocation so make sure we don't + // drop those bytes. + var loc []int + var match string + for loc == nil { + // Reports the cursor position (CPR) to the application as (as though typed at + // the keyboard) ESC[n;mR, where n is the row and m is the column. + reader := bufio.NewReader(c.In) + text, err := reader.ReadSlice(byte('R')) + if err != nil { + return nil, err + } + + loc = dsrPattern.FindStringIndex(string(text)) + if loc == nil { + // After reading slice to byte 'R', the bufio Reader may have read more + // bytes into its internal buffer which will be discarded on next ReadSlice. + // We create a temporary buffer to read the remaining buffered slice and + // write them to output buffer. + buffered := make([]byte, reader.Buffered()) + _, err = io.ReadFull(reader, buffered) + if err != nil { + return nil, err + } + + // Stdin contains R that doesn't match DSR, so pass the bytes along to + // output buffer. + buf.Write(text) + buf.Write(buffered) + } else { + // Write the non-matching leading bytes to output buffer. + buf.Write(text[:loc[0]]) + + // Save the matching bytes to extract the row and column of the cursor. + match = string(text[loc[0]:loc[1]]) + } + } + + matches := dsrPattern.FindStringSubmatch(string(match)) + if len(matches) != 3 { + return nil, fmt.Errorf("incorrect number of matches: %d", len(matches)) + } + + col, err := strconv.Atoi(matches[2]) + if err != nil { + return nil, err + } + + row, err := strconv.Atoi(matches[1]) + if err != nil { + return nil, err + } + + return &Coord{Short(col), Short(row)}, nil +} + +func (cur Coord) CursorIsAtLineEnd(size *Coord) bool { + return cur.X == size.X +} + +func (cur Coord) CursorIsAtLineBegin() bool { + return cur.X == COORDINATE_SYSTEM_BEGIN +} + +// Size returns the height and width of the terminal. +func (c *Cursor) Size(buf *bytes.Buffer) (*Coord, error) { + // the general approach here is to move the cursor to the very bottom + // of the terminal, ask for the current location and then move the + // cursor back where we started + + // hide the cursor (so it doesn't blink when getting the size of the terminal) + c.Hide() + defer c.Show() + + // save the current location of the cursor + c.Save() + defer c.Restore() + + // move the cursor to the very bottom of the terminal + c.Move(999, 999) + + // ask for the current location + bottom, err := c.Location(buf) + if err != nil { + return nil, err + } + + // since the bottom was calculated in the lower right corner, it + // is the dimensions we are looking for + return bottom, nil +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/cursor_windows.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/cursor_windows.go new file mode 100644 index 0000000000..e24440e7c5 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/cursor_windows.go @@ -0,0 +1,138 @@ +package terminal + +import ( + "bytes" + "syscall" + "unsafe" +) + +var COORDINATE_SYSTEM_BEGIN Short = 0 + +// shared variable to save the cursor location from CursorSave() +var cursorLoc Coord + +type Cursor struct { + In FileReader + Out FileWriter +} + +func (c *Cursor) Up(n int) { + c.cursorMove(0, n) +} + +func (c *Cursor) Down(n int) { + c.cursorMove(0, -1*n) +} + +func (c *Cursor) Forward(n int) { + c.cursorMove(n, 0) +} + +func (c *Cursor) Back(n int) { + c.cursorMove(-1*n, 0) +} + +// save the cursor location +func (c *Cursor) Save() { + cursorLoc, _ = c.Location(nil) +} + +func (c *Cursor) Restore() { + handle := syscall.Handle(c.Out.Fd()) + // restore it to the original position + procSetConsoleCursorPosition.Call(uintptr(handle), uintptr(*(*int32)(unsafe.Pointer(&cursorLoc)))) +} + +func (cur Coord) CursorIsAtLineEnd(size *Coord) bool { + return cur.X == size.X +} + +func (cur Coord) CursorIsAtLineBegin() bool { + return cur.X == 0 +} + +func (c *Cursor) cursorMove(x int, y int) { + handle := syscall.Handle(c.Out.Fd()) + + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + + var cursor Coord + cursor.X = csbi.cursorPosition.X + Short(x) + cursor.Y = csbi.cursorPosition.Y + Short(y) + + procSetConsoleCursorPosition.Call(uintptr(handle), uintptr(*(*int32)(unsafe.Pointer(&cursor)))) +} + +func (c *Cursor) NextLine(n int) { + c.Up(n) + c.HorizontalAbsolute(0) +} + +func (c *Cursor) PreviousLine(n int) { + c.Down(n) + c.HorizontalAbsolute(0) +} + +// for comparability purposes between windows +// in windows we don't have to print out a new line +func (c *Cursor) MoveNextLine(cur Coord, terminalSize *Coord) { + c.NextLine(1) +} + +func (c *Cursor) HorizontalAbsolute(x int) { + handle := syscall.Handle(c.Out.Fd()) + + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + + var cursor Coord + cursor.X = Short(x) + cursor.Y = csbi.cursorPosition.Y + + if csbi.size.X < cursor.X { + cursor.X = csbi.size.X + } + + procSetConsoleCursorPosition.Call(uintptr(handle), uintptr(*(*int32)(unsafe.Pointer(&cursor)))) +} + +func (c *Cursor) Show() { + handle := syscall.Handle(c.Out.Fd()) + + var cci consoleCursorInfo + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&cci))) + cci.visible = 1 + + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&cci))) +} + +func (c *Cursor) Hide() { + handle := syscall.Handle(c.Out.Fd()) + + var cci consoleCursorInfo + procGetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&cci))) + cci.visible = 0 + + procSetConsoleCursorInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&cci))) +} + +func (c *Cursor) Location(buf *bytes.Buffer) (Coord, error) { + handle := syscall.Handle(c.Out.Fd()) + + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + + return csbi.cursorPosition, nil +} + +func (c *Cursor) Size(buf *bytes.Buffer) (*Coord, error) { + handle := syscall.Handle(c.Out.Fd()) + + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + // windows' coordinate system begins at (0, 0) + csbi.size.X-- + csbi.size.Y-- + return &csbi.size, nil +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/display.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/display.go new file mode 100644 index 0000000000..0f014b1353 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/display.go @@ -0,0 +1,9 @@ +package terminal + +type EraseLineMode int + +const ( + ERASE_LINE_END EraseLineMode = iota + ERASE_LINE_START + ERASE_LINE_ALL +) diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/display_posix.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/display_posix.go new file mode 100644 index 0000000000..838dd6646e --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/display_posix.go @@ -0,0 +1,11 @@ +// +build !windows + +package terminal + +import ( + "fmt" +) + +func EraseLine(out FileWriter, mode EraseLineMode) { + fmt.Fprintf(out, "\x1b[%dK", mode) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/display_windows.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/display_windows.go new file mode 100644 index 0000000000..0adc1ded38 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/display_windows.go @@ -0,0 +1,27 @@ +package terminal + +import ( + "syscall" + "unsafe" +) + +func EraseLine(out FileWriter, mode EraseLineMode) { + handle := syscall.Handle(out.Fd()) + + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + + var w uint32 + var x Short + cursor := csbi.cursorPosition + switch mode { + case ERASE_LINE_END: + x = csbi.size.X + case ERASE_LINE_START: + x = 0 + case ERASE_LINE_ALL: + cursor.X = 0 + x = csbi.size.X + } + procFillConsoleOutputCharacter.Call(uintptr(handle), uintptr(' '), uintptr(x), uintptr(*(*int32)(unsafe.Pointer(&cursor))), uintptr(unsafe.Pointer(&w))) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/error.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/error.go new file mode 100644 index 0000000000..710c361406 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/error.go @@ -0,0 +1,9 @@ +package terminal + +import ( + "errors" +) + +var ( + InterruptErr = errors.New("interrupt") +) diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/output.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/output.go new file mode 100644 index 0000000000..6fe11c089f --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/output.go @@ -0,0 +1,19 @@ +// +build !windows + +package terminal + +import ( + "io" +) + +// NewAnsiStdout returns special stdout, which converts escape sequences to Windows API calls +// on Windows environment. +func NewAnsiStdout(out FileWriter) io.Writer { + return out +} + +// NewAnsiStderr returns special stderr, which converts escape sequences to Windows API calls +// on Windows environment. +func NewAnsiStderr(out FileWriter) io.Writer { + return out +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/output_windows.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/output_windows.go new file mode 100644 index 0000000000..6622690f32 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/output_windows.go @@ -0,0 +1,227 @@ +package terminal + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +var ( + cursorFunctions = map[rune]func(c *Cursor) func(int){ + 'A': func(c *Cursor) func(int) { return c.Up }, + 'B': func(c *Cursor) func(int) { return c.Down }, + 'C': func(c *Cursor) func(int) { return c.Forward }, + 'D': func(c *Cursor) func(int) { return c.Back }, + 'E': func(c *Cursor) func(int) { return c.NextLine }, + 'F': func(c *Cursor) func(int) { return c.PreviousLine }, + 'G': func(c *Cursor) func(int) { return c.HorizontalAbsolute }, + } +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type Writer struct { + out FileWriter + handle syscall.Handle + orgAttr word +} + +func NewAnsiStdout(out FileWriter) io.Writer { + var csbi consoleScreenBufferInfo + if !isatty.IsTerminal(out.Fd()) { + return out + } + handle := syscall.Handle(out.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: out, handle: handle, orgAttr: csbi.attributes} +} + +func NewAnsiStderr(out FileWriter) io.Writer { + var csbi consoleScreenBufferInfo + if !isatty.IsTerminal(out.Fd()) { + return out + } + handle := syscall.Handle(out.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: out, handle: handle, orgAttr: csbi.attributes} +} + +func (w *Writer) Write(data []byte) (n int, err error) { + r := bytes.NewReader(data) + + for { + ch, size, err := r.ReadRune() + if err != nil { + break + } + n += size + + switch ch { + case '\x1b': + size, err = w.handleEscape(r) + n += size + if err != nil { + break + } + default: + fmt.Fprint(w.out, string(ch)) + } + } + return +} + +func (w *Writer) handleEscape(r *bytes.Reader) (n int, err error) { + buf := make([]byte, 0, 10) + buf = append(buf, "\x1b"...) + + // Check '[' continues after \x1b + ch, size, err := r.ReadRune() + if err != nil { + fmt.Fprint(w.out, string(buf)) + return + } + n += size + if ch != '[' { + fmt.Fprint(w.out, string(buf)) + return + } + + // Parse escape code + var code rune + argBuf := make([]byte, 0, 10) + for { + ch, size, err = r.ReadRune() + if err != nil { + fmt.Fprint(w.out, string(buf)) + return + } + n += size + if ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') { + code = ch + break + } + argBuf = append(argBuf, string(ch)...) + } + + w.applyEscapeCode(buf, string(argBuf), code) + return +} + +func (w *Writer) applyEscapeCode(buf []byte, arg string, code rune) { + c := &Cursor{Out: w.out} + + switch arg + string(code) { + case "?25h": + c.Show() + return + case "?25l": + c.Hide() + return + } + + if f, ok := cursorFunctions[code]; ok { + if n, err := strconv.Atoi(arg); err == nil { + f(c)(n) + return + } + } + + switch code { + case 'm': + w.applySelectGraphicRendition(arg) + default: + buf = append(buf, string(code)...) + fmt.Fprint(w.out, string(buf)) + } +} + +// Original implementation: https://github.com/mattn/go-colorable +func (w *Writer) applySelectGraphicRendition(arg string) { + if arg == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.orgAttr)) + return + } + + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + + for _, param := range strings.Split(arg, ";") { + n, err := strconv.Atoi(param) + if err != nil { + continue + } + + switch { + case n == 0 || n == 100: + attr = w.orgAttr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + } + + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader.go new file mode 100644 index 0000000000..94c915c5de --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader.go @@ -0,0 +1,373 @@ +package terminal + +import ( + "fmt" + "unicode" + + "golang.org/x/text/width" +) + +type RuneReader struct { + stdio Stdio + cursor *Cursor + state runeReaderState +} + +func NewRuneReader(stdio Stdio) *RuneReader { + return &RuneReader{ + stdio: stdio, + state: newRuneReaderState(stdio.In), + } +} + +func (rr *RuneReader) printChar(char rune, mask rune) { + // if we don't need to mask the input + if mask == 0 { + // just print the character the user pressed + fmt.Fprintf(rr.stdio.Out, "%c", char) + } else { + // otherwise print the mask we were given + fmt.Fprintf(rr.stdio.Out, "%c", mask) + } +} + +type OnRuneFn func(rune, []rune) ([]rune, bool, error) + +func (rr *RuneReader) ReadLine(mask rune, onRunes ...OnRuneFn) ([]rune, error) { + return rr.ReadLineWithDefault(mask, []rune{}, onRunes...) +} + +func (rr *RuneReader) ReadLineWithDefault(mask rune, d []rune, onRunes ...OnRuneFn) ([]rune, error) { + line := []rune{} + // we only care about horizontal displacements from the origin so start counting at 0 + index := 0 + + cursor := &Cursor{ + In: rr.stdio.In, + Out: rr.stdio.Out, + } + + onRune := func(r rune, line []rune) ([]rune, bool, error) { + return line, false, nil + } + + // if the user pressed a key the caller was interested in capturing + if len(onRunes) > 0 { + onRune = onRunes[0] + } + + // we get the terminal width and height (if resized after this point the property might become invalid) + terminalSize, _ := cursor.Size(rr.Buffer()) + // we set the current location of the cursor once + cursorCurrent, _ := cursor.Location(rr.Buffer()) + + increment := func() { + if cursorCurrent.CursorIsAtLineEnd(terminalSize) { + cursorCurrent.X = COORDINATE_SYSTEM_BEGIN + cursorCurrent.Y++ + } else { + cursorCurrent.X++ + } + } + decrement := func() { + if cursorCurrent.CursorIsAtLineBegin() { + cursorCurrent.X = terminalSize.X + cursorCurrent.Y-- + } else { + cursorCurrent.X-- + } + } + + if len(d) > 0 { + index = len(d) + fmt.Fprint(rr.stdio.Out, string(d)) + line = d + for range d { + increment() + } + } + + for { + // wait for some input + r, _, err := rr.ReadRune() + if err != nil { + return line, err + } + + if l, stop, err := onRune(r, line); stop || err != nil { + return l, err + } + + // if the user pressed enter or some other newline/termination like ctrl+d + if r == '\r' || r == '\n' || r == KeyEndTransmission { + // delete what's printed out on the console screen (cleanup) + for index > 0 { + if cursorCurrent.CursorIsAtLineBegin() { + EraseLine(rr.stdio.Out, ERASE_LINE_END) + cursor.PreviousLine(1) + cursor.Forward(int(terminalSize.X)) + } else { + cursor.Back(1) + } + decrement() + index-- + } + // move the cursor the a new line + cursor.MoveNextLine(cursorCurrent, terminalSize) + + // we're done processing the input + return line, nil + } + // if the user interrupts (ie with ctrl+c) + if r == KeyInterrupt { + // go to the beginning of the next line + fmt.Fprint(rr.stdio.Out, "\r\n") + + // we're done processing the input, and treat interrupt like an error + return line, InterruptErr + } + + // allow for backspace/delete editing of inputs + if r == KeyBackspace || r == KeyDelete { + // and we're not at the beginning of the line + if index > 0 && len(line) > 0 { + // if we are at the end of the word + if index == len(line) { + // just remove the last letter from the internal representation + // also count the number of cells the rune before the cursor occupied + cells := runeWidth(line[len(line)-1]) + line = line[:len(line)-1] + // go back one + if cursorCurrent.X == 1 { + cursor.PreviousLine(1) + cursor.Forward(int(terminalSize.X)) + } else { + cursor.Back(cells) + } + + // clear the rest of the line + EraseLine(rr.stdio.Out, ERASE_LINE_END) + } else { + // we need to remove a character from the middle of the word + + cells := runeWidth(line[index-1]) + + // remove the current index from the list + line = append(line[:index-1], line[index:]...) + + // save the current position of the cursor, as we have to move the cursor one back to erase the current symbol + // and then move the cursor for each symbol in line[index-1:] to print it out, afterwards we want to restore + // the cursor to its previous location. + cursor.Save() + + // clear the rest of the line + cursor.Back(cells) + + // print what comes after + for _, char := range line[index-1:] { + //Erase symbols which are left over from older print + EraseLine(rr.stdio.Out, ERASE_LINE_END) + // print characters to the new line appropriately + rr.printChar(char, mask) + + } + // erase what's left over from last print + if cursorCurrent.Y < terminalSize.Y { + cursor.NextLine(1) + EraseLine(rr.stdio.Out, ERASE_LINE_END) + } + // restore cursor + cursor.Restore() + if cursorCurrent.CursorIsAtLineBegin() { + cursor.PreviousLine(1) + cursor.Forward(int(terminalSize.X)) + } else { + cursor.Back(cells) + } + } + + // decrement the index + index-- + decrement() + } else { + // otherwise the user pressed backspace while at the beginning of the line + soundBell(rr.stdio.Out) + } + + // we're done processing this key + continue + } + + // if the left arrow is pressed + if r == KeyArrowLeft { + // if we have space to the left + if index > 0 { + //move the cursor to the prev line if necessary + if cursorCurrent.CursorIsAtLineBegin() { + cursor.PreviousLine(1) + cursor.Forward(int(terminalSize.X)) + } else { + cursor.Back(runeWidth(line[index-1])) + } + //decrement the index + index-- + decrement() + + } else { + // otherwise we are at the beginning of where we started reading lines + // sound the bell + soundBell(rr.stdio.Out) + } + + // we're done processing this key press + continue + } + + // if the right arrow is pressed + if r == KeyArrowRight { + // if we have space to the right + if index < len(line) { + // move the cursor to the next line if necessary + if cursorCurrent.CursorIsAtLineEnd(terminalSize) { + cursor.NextLine(1) + } else { + cursor.Forward(runeWidth(line[index])) + } + index++ + increment() + + } else { + // otherwise we are at the end of the word and can't go past + // sound the bell + soundBell(rr.stdio.Out) + } + + // we're done processing this key press + continue + } + // the user pressed one of the special keys + if r == SpecialKeyHome { + for index > 0 { + if cursorCurrent.CursorIsAtLineBegin() { + cursor.PreviousLine(1) + cursor.Forward(int(terminalSize.X)) + cursorCurrent.Y-- + cursorCurrent.X = terminalSize.X + } else { + cursor.Back(runeWidth(line[index-1])) + cursorCurrent.X -= Short(runeWidth(line[index-1])) + } + index-- + } + continue + // user pressed end + } else if r == SpecialKeyEnd { + for index != len(line) { + if cursorCurrent.CursorIsAtLineEnd(terminalSize) { + cursor.NextLine(1) + cursorCurrent.Y++ + cursorCurrent.X = COORDINATE_SYSTEM_BEGIN + } else { + cursor.Forward(runeWidth(line[index])) + cursorCurrent.X += Short(runeWidth(line[index])) + } + index++ + } + continue + // user pressed forward delete key + } else if r == SpecialKeyDelete { + // if index at the end of the line nothing to delete + if index != len(line) { + // save the current position of the cursor, as we have to erase the current symbol + // and then move the cursor for each symbol in line[index:] to print it out, afterwards we want to restore + // the cursor to its previous location. + cursor.Save() + // remove the symbol after the cursor + line = append(line[:index], line[index+1:]...) + // print the updated line + for _, char := range line[index:] { + EraseLine(rr.stdio.Out, ERASE_LINE_END) + // print out the character + rr.printChar(char, mask) + } + // erase what's left on last line + if cursorCurrent.Y < terminalSize.Y { + cursor.NextLine(1) + EraseLine(rr.stdio.Out, ERASE_LINE_END) + } + // restore cursor + cursor.Restore() + if len(line) == 0 || index == len(line) { + EraseLine(rr.stdio.Out, ERASE_LINE_END) + } + } + continue + } + + // if the letter is another escape sequence + if unicode.IsControl(r) || r == IgnoreKey { + // ignore it + continue + } + + // the user pressed a regular key + + // if we are at the end of the line + if index == len(line) { + // just append the character at the end of the line + line = append(line, r) + // save the location of the cursor + index++ + increment() + // print out the character + rr.printChar(r, mask) + } else { + // we are in the middle of the word so we need to insert the character the user pressed + line = append(line[:index], append([]rune{r}, line[index:]...)...) + // save the current position of the cursor, as we have to move the cursor back to erase the current symbol + // and then move for each symbol in line[index:] to print it out, afterwards we want to restore + // cursor's location to its previous one. + cursor.Save() + EraseLine(rr.stdio.Out, ERASE_LINE_END) + // remove the symbol after the cursor + // print the updated line + for _, char := range line[index:] { + EraseLine(rr.stdio.Out, ERASE_LINE_END) + // print out the character + rr.printChar(char, mask) + increment() + } + // if we are at the last line, we want to visually insert a new line and append to it. + if cursorCurrent.CursorIsAtLineEnd(terminalSize) && cursorCurrent.Y == terminalSize.Y { + // add a new line to the terminal + fmt.Fprintln(rr.stdio.Out) + // restore the position of the cursor horizontally + cursor.Restore() + // restore the position of the cursor vertically + cursor.PreviousLine(1) + } else { + // restore cursor + cursor.Restore() + } + // check if cursor needs to move to next line + cursorCurrent, _ = cursor.Location(rr.Buffer()) + if cursorCurrent.CursorIsAtLineEnd(terminalSize) { + cursor.NextLine(1) + } else { + cursor.Forward(runeWidth(r)) + } + // increment the index + index++ + increment() + + } + } +} + +func runeWidth(r rune) int { + switch width.LookupRune(r).Kind() { + case width.EastAsianWide, width.EastAsianFullwidth: + return 2 + } + return 1 +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_bsd.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_bsd.go new file mode 100644 index 0000000000..6ea340923a --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_bsd.go @@ -0,0 +1,13 @@ +// copied from: https://github.com/golang/crypto/blob/master/ssh/terminal/util_bsd.go +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA +const ioctlWriteTermios = syscall.TIOCSETA diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_linux.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_linux.go new file mode 100644 index 0000000000..6dd60ea697 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_linux.go @@ -0,0 +1,13 @@ +// copied from https://github.com/golang/crypto/blob/master/ssh/terminal/util_linux.go +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// +build linux,!ppc64le + +package terminal + +// These constants are declared here, rather than importing +// them from the syscall package as some syscall packages, even +// on linux, for example gccgo, do not declare them. +const ioctlReadTermios = 0x5401 // syscall.TCGETS +const ioctlWriteTermios = 0x5402 // syscall.TCSETS diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_posix.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_posix.go new file mode 100644 index 0000000000..a7e3842505 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_posix.go @@ -0,0 +1,111 @@ +// +build !windows + +// The terminal mode manipulation code is derived heavily from: +// https://github.com/golang/crypto/blob/master/ssh/terminal/util.go: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bufio" + "bytes" + "fmt" + "syscall" + "unsafe" +) + +type runeReaderState struct { + term syscall.Termios + reader *bufio.Reader + buf *bytes.Buffer +} + +func newRuneReaderState(input FileReader) runeReaderState { + buf := new(bytes.Buffer) + return runeReaderState{ + reader: bufio.NewReader(&BufferedReader{ + In: input, + Buffer: buf, + }), + buf: buf, + } +} + +func (rr *RuneReader) Buffer() *bytes.Buffer { + return rr.state.buf +} + +// For reading runes we just want to disable echo. +func (rr *RuneReader) SetTermMode() error { + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(rr.stdio.In.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&rr.state.term)), 0, 0, 0); err != 0 { + return err + } + + newState := rr.state.term + newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG + + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(rr.stdio.In.Fd()), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { + return err + } + + return nil +} + +func (rr *RuneReader) RestoreTermMode() error { + if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(rr.stdio.In.Fd()), ioctlWriteTermios, uintptr(unsafe.Pointer(&rr.state.term)), 0, 0, 0); err != 0 { + return err + } + return nil +} + +func (rr *RuneReader) ReadRune() (rune, int, error) { + r, size, err := rr.state.reader.ReadRune() + if err != nil { + return r, size, err + } + + // parse ^[ sequences to look for arrow keys + if r == '\033' { + if rr.state.reader.Buffered() == 0 { + // no more characters so must be `Esc` key + return KeyEscape, 1, nil + } + r, size, err = rr.state.reader.ReadRune() + if err != nil { + return r, size, err + } + if r != '[' { + return r, size, fmt.Errorf("Unexpected Escape Sequence: %q", []rune{'\033', r}) + } + r, size, err = rr.state.reader.ReadRune() + if err != nil { + return r, size, err + } + switch r { + case 'D': + return KeyArrowLeft, 1, nil + case 'C': + return KeyArrowRight, 1, nil + case 'A': + return KeyArrowUp, 1, nil + case 'B': + return KeyArrowDown, 1, nil + case 'H': // Home button + return SpecialKeyHome, 1, nil + case 'F': // End button + return SpecialKeyEnd, 1, nil + case '3': // Delete Button + // discard the following '~' key from buffer + rr.state.reader.Discard(1) + return SpecialKeyDelete, 1, nil + default: + // discard the following '~' key from buffer + rr.state.reader.Discard(1) + return IgnoreKey, 1, nil + } + return r, size, fmt.Errorf("Unknown Escape Sequence: %q", []rune{'\033', '[', r}) + } + return r, size, err +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_ppc64le.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_ppc64le.go new file mode 100644 index 0000000000..ae4eb09737 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_ppc64le.go @@ -0,0 +1,7 @@ +// +build ppc64le,linux + +package terminal + +// Used syscall numbers from https://github.com/golang/go/blob/master/src/syscall/ztypes_linux_ppc64le.go +const ioctlReadTermios = 0x402c7413 // syscall.TCGETS +const ioctlWriteTermios = 0x802c7414 // syscall.TCSETS diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_windows.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_windows.go new file mode 100644 index 0000000000..791092f531 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/runereader_windows.go @@ -0,0 +1,142 @@ +package terminal + +import ( + "bytes" + "syscall" + "unsafe" +) + +var ( + dll = syscall.NewLazyDLL("kernel32.dll") + setConsoleMode = dll.NewProc("SetConsoleMode") + getConsoleMode = dll.NewProc("GetConsoleMode") + readConsoleInput = dll.NewProc("ReadConsoleInputW") +) + +const ( + EVENT_KEY = 0x0001 + + // key codes for arrow keys + // https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx + VK_DELETE = 0x2E + VK_END = 0x23 + VK_HOME = 0x24 + VK_LEFT = 0x25 + VK_UP = 0x26 + VK_RIGHT = 0x27 + VK_DOWN = 0x28 + + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + + ENABLE_ECHO_INPUT uint32 = 0x0004 + ENABLE_LINE_INPUT uint32 = 0x0002 + ENABLE_PROCESSED_INPUT uint32 = 0x0001 +) + +type inputRecord struct { + eventType uint16 + padding uint16 + event [16]byte +} + +type keyEventRecord struct { + bKeyDown int32 + wRepeatCount uint16 + wVirtualKeyCode uint16 + wVirtualScanCode uint16 + unicodeChar uint16 + wdControlKeyState uint32 +} + +type runeReaderState struct { + term uint32 +} + +func newRuneReaderState(input FileReader) runeReaderState { + return runeReaderState{} +} + +func (rr *RuneReader) Buffer() *bytes.Buffer { + return nil +} + +func (rr *RuneReader) SetTermMode() error { + r, _, err := getConsoleMode.Call(uintptr(rr.stdio.In.Fd()), uintptr(unsafe.Pointer(&rr.state.term))) + // windows return 0 on error + if r == 0 { + return err + } + + newState := rr.state.term + newState &^= ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT + r, _, err = setConsoleMode.Call(uintptr(rr.stdio.In.Fd()), uintptr(newState)) + // windows return 0 on error + if r == 0 { + return err + } + return nil +} + +func (rr *RuneReader) RestoreTermMode() error { + r, _, err := setConsoleMode.Call(uintptr(rr.stdio.In.Fd()), uintptr(rr.state.term)) + // windows return 0 on error + if r == 0 { + return err + } + return nil +} + +func (rr *RuneReader) ReadRune() (rune, int, error) { + ir := &inputRecord{} + bytesRead := 0 + for { + rv, _, e := readConsoleInput.Call(rr.stdio.In.Fd(), uintptr(unsafe.Pointer(ir)), 1, uintptr(unsafe.Pointer(&bytesRead))) + // windows returns non-zero to indicate success + if rv == 0 && e != nil { + return 0, 0, e + } + + if ir.eventType != EVENT_KEY { + continue + } + + // the event data is really a c struct union, so here we have to do an usafe + // cast to put the data into the keyEventRecord (since we have already verified + // above that this event does correspond to a key event + key := (*keyEventRecord)(unsafe.Pointer(&ir.event[0])) + // we only care about key down events + if key.bKeyDown == 0 { + continue + } + if key.wdControlKeyState&(LEFT_CTRL_PRESSED|RIGHT_CTRL_PRESSED) != 0 && key.unicodeChar == 'C' { + return KeyInterrupt, bytesRead, nil + } + // not a normal character so look up the input sequence from the + // virtual key code mappings (VK_*) + if key.unicodeChar == 0 { + switch key.wVirtualKeyCode { + case VK_DOWN: + return KeyArrowDown, bytesRead, nil + case VK_LEFT: + return KeyArrowLeft, bytesRead, nil + case VK_RIGHT: + return KeyArrowRight, bytesRead, nil + case VK_UP: + return KeyArrowUp, bytesRead, nil + case VK_DELETE: + return SpecialKeyDelete, bytesRead, nil + case VK_HOME: + return SpecialKeyHome, bytesRead, nil + case VK_END: + return SpecialKeyEnd, bytesRead, nil + default: + // not a virtual key that we care about so just continue on to + // the next input key + continue + } + } + r := rune(key.unicodeChar) + return r, bytesRead, nil + } +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/sequences.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/sequences.go new file mode 100644 index 0000000000..6d9e87755b --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/sequences.go @@ -0,0 +1,31 @@ +package terminal + +import ( + "fmt" + "io" +) + +const ( + KeyArrowLeft = '\x02' + KeyArrowRight = '\x06' + KeyArrowUp = '\x10' + KeyArrowDown = '\x0e' + KeySpace = ' ' + KeyEnter = '\r' + KeyBackspace = '\b' + KeyDelete = '\x7f' + KeyInterrupt = '\x03' + KeyEndTransmission = '\x04' + KeyEscape = '\x1b' + KeyDeleteWord = '\x17' // Ctrl+W + KeyDeleteLine = '\x18' // Ctrl+X + SpecialKeyHome = '\x01' + SpecialKeyEnd = '\x11' + SpecialKeyDelete = '\x12' + IgnoreKey = '\000' + KeyTab = '\t' +) + +func soundBell(out io.Writer) { + fmt.Fprint(out, "\a") +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/stdio.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/stdio.go new file mode 100644 index 0000000000..88bf7c4ea7 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/stdio.go @@ -0,0 +1,24 @@ +package terminal + +import ( + "io" +) + +// Stdio is the standard input/output the terminal reads/writes with. +type Stdio struct { + In FileReader + Out FileWriter + Err io.Writer +} + +// FileWriter provides a minimal interface for Stdin. +type FileWriter interface { + io.Writer + Fd() uintptr +} + +// FileReader provides a minimal interface for Stdout. +type FileReader interface { + io.Reader + Fd() uintptr +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/syscall_windows.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/syscall_windows.go new file mode 100644 index 0000000000..63b85d4c39 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/syscall_windows.go @@ -0,0 +1,39 @@ +package terminal + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") +) + +type wchar uint16 +type dword uint32 +type word uint16 + +type smallRect struct { + left Short + top Short + right Short + bottom Short +} + +type consoleScreenBufferInfo struct { + size Coord + cursorPosition Coord + attributes word + window smallRect + maximumWindowSize Coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/terminal/terminal.go b/vendor/github.com/AlecAivazis/survey/v2/terminal/terminal.go new file mode 100644 index 0000000000..4b59062c7d --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/terminal/terminal.go @@ -0,0 +1,8 @@ +package terminal + +type Short int16 + +type Coord struct { + X Short + Y Short +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/transform.go b/vendor/github.com/AlecAivazis/survey/v2/transform.go new file mode 100644 index 0000000000..58d5193b03 --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/transform.go @@ -0,0 +1,79 @@ +package survey + +import ( + "reflect" + "strings" +) + +// TransformString returns a `Transformer` based on the "f" +// function which accepts a string representation of the answer +// and returns a new one, transformed, answer. +// Take for example the functions inside the std `strings` package, +// they can be converted to a compatible `Transformer` by using this function, +// i.e: `TransformString(strings.Title)`, `TransformString(strings.ToUpper)`. +// +// Note that `TransformString` is just a helper, `Transformer` can be used +// to transform any type of answer. +func TransformString(f func(s string) string) Transformer { + return func(ans interface{}) interface{} { + // if the answer value passed in is the zero value of the appropriate type + if isZero(reflect.ValueOf(ans)) { + // skip this `Transformer` by returning a zero value of string. + // The original answer will be not affected, + // see survey.go#L125. + // A zero value of string should be returned to be handled by + // next Transformer in a composed Tranformer, + // see tranform.go#L75 + return "" + } + + // "ans" is never nil here, so we don't have to check that + // see survey.go#L338 for more. + // Make sure that the the answer's value was a typeof string. + s, ok := ans.(string) + if !ok { + return "" + } + + return f(s) + } +} + +// ToLower is a `Transformer`. +// It receives an answer value +// and returns a copy of the "ans" +// with all Unicode letters mapped to their lower case. +// +// Note that if "ans" is not a string then it will +// return a nil value, meaning that the above answer +// will not be affected by this call at all. +func ToLower(ans interface{}) interface{} { + transformer := TransformString(strings.ToLower) + return transformer(ans) +} + +// Title is a `Transformer`. +// It receives an answer value +// and returns a copy of the "ans" +// with all Unicode letters that begin words +// mapped to their title case. +// +// Note that if "ans" is not a string then it will +// return a nil value, meaning that the above answer +// will not be affected by this call at all. +func Title(ans interface{}) interface{} { + transformer := TransformString(strings.Title) + return transformer(ans) +} + +// ComposeTransformers is a variadic function used to create one transformer from many. +func ComposeTransformers(transformers ...Transformer) Transformer { + // return a transformer that calls each one sequentially + return func(ans interface{}) interface{} { + // execute each transformer + for _, t := range transformers { + ans = t(ans) + } + return ans + } +} diff --git a/vendor/github.com/AlecAivazis/survey/v2/validate.go b/vendor/github.com/AlecAivazis/survey/v2/validate.go new file mode 100644 index 0000000000..ba7e1278ff --- /dev/null +++ b/vendor/github.com/AlecAivazis/survey/v2/validate.go @@ -0,0 +1,87 @@ +package survey + +import ( + "errors" + "fmt" + "reflect" +) + +// Required does not allow an empty value +func Required(val interface{}) error { + // the reflect value of the result + value := reflect.ValueOf(val) + + // if the value passed in is the zero value of the appropriate type + if isZero(value) && value.Kind() != reflect.Bool { + return errors.New("Value is required") + } + return nil +} + +// MaxLength requires that the string is no longer than the specified value +func MaxLength(length int) Validator { + // return a validator that checks the length of the string + return func(val interface{}) error { + if str, ok := val.(string); ok { + // if the string is longer than the given value + if len([]rune(str)) > length { + // yell loudly + return fmt.Errorf("value is too long. Max length is %v", length) + } + } else { + // otherwise we cannot convert the value into a string and cannot enforce length + return fmt.Errorf("cannot enforce length on response of type %v", reflect.TypeOf(val).Name()) + } + + // the input is fine + return nil + } +} + +// MinLength requires that the string is longer or equal in length to the specified value +func MinLength(length int) Validator { + // return a validator that checks the length of the string + return func(val interface{}) error { + if str, ok := val.(string); ok { + // if the string is shorter than the given value + if len([]rune(str)) < length { + // yell loudly + return fmt.Errorf("value is too short. Min length is %v", length) + } + } else { + // otherwise we cannot convert the value into a string and cannot enforce length + return fmt.Errorf("cannot enforce length on response of type %v", reflect.TypeOf(val).Name()) + } + + // the input is fine + return nil + } +} + +// ComposeValidators is a variadic function used to create one validator from many. +func ComposeValidators(validators ...Validator) Validator { + // return a validator that calls each one sequentially + return func(val interface{}) error { + // execute each validator + for _, validator := range validators { + // if the answer's value is not valid + if err := validator(val); err != nil { + // return the error + return err + } + } + // we passed all validators, the answer is valid + return nil + } +} + +// isZero returns true if the passed value is the zero object +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Slice, reflect.Map: + return v.Len() == 0 + } + + // compare the types directly with more general coverage + return reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) +} diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 0000000000..e3d9a64d1d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 0000000000..261c041e7a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,12 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 0000000000..96504a33bc --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 0000000000..8d66e777c0 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 0000000000..bcbe00d0c5 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 0000000000..7ed5e01c34 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 0000000000..1c719db9e4 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 0000000000..6390abd231 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 0000000000..98087b38c2 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 0000000000..52451e9469 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 0000000000..593b10ab69 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + oscState.parser.logf("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 0000000000..03cec7ada6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,151 @@ +package ansiterm + +import ( + "errors" + "log" + "os" +) + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state + + logf func(string, ...interface{}) +} + +type Option func(*AnsiParser) + +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f + } +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + for _, o := range opts { + o(ap) + } + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } + + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + ap.logf("WARNING: newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 0000000000..de0a1f9cde --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,99 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 0000000000..0bb5e51e9a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,119 @@ +package ansiterm + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + ap.logf("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + ap.logf("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) + + ap.logf("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 0000000000..f2ea1fcd12 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 0000000000..392114493a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 0000000000..93457f7bba --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,183 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" + windows "golang.org/x/sys/windows" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + switch nFile { + case syscall.STD_INPUT_HANDLE, windows.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE, windows.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE, windows.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 0000000000..6055e33b91 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,327 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 0000000000..cbec8f728f --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 0000000000..3ee06ea728 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 0000000000..244b5fa25e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 0000000000..2d27fa1d02 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 0000000000..afa7635d77 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 0000000000..2d40fb75ad --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,743 @@ +// +build windows + +package winterm + +import ( + "bytes" + "log" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" +) + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD + logf func(string, ...interface{}) +} + +type Option func(*windowsAnsiEventHandler) + +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f + } +} + +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + h := &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + h.logf("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + h.logf("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + h.logf("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + h.logf("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + h.logf("set window failed: %v", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.logf("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + h.logf("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + h.logf("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + h.logf("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + h.logf("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + h.logf("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 0000000000..cd11be9653 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 0000000000..f621b01196 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1 @@ +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000000..64410cf755 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,220 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; install it with: + + $ go get github.com/BurntSushi/toml + +It also comes with a TOML validator CLI tool: + + $ go get github.com/BurntSushi/toml/cmd/tomlv + $ tomlv some-toml-file.toml + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles XML and +JSON. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other most other decoders **only exported fields** are +considered when encoding and decoding; private fields are silently ignored. + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. + diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000000..d3d3b8397c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,511 @@ +package toml + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use the PrimitiveDecode() function to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps (dealer's choice – they can be +// used interchangeably). +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed +// in the local timezone. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + + // TODO: have parser should read from io.Reader? Or at the very least, make + // it read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// Decode the TOML data in to the pointer v. +// +// See the documentation on Decoder for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at path and decode it for you. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + if k := rv.Type().Key().Kind(); k != reflect.String { + return fmt.Errorf( + "toml: cannot decode to a map with non-string key type (%s in %q)", + k, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return true + } + return false +} + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 0000000000..38aa75fdce --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,18 @@ +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS is just like Decode, except it will automatically read the contents +// of the file at `path` from a fs.FS instance. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 0000000000..ad8899c6cf --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,123 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not be +// inferable via reflection. In particular, whether a key has been defined and +// the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use: +// +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// values of this type. +type Key []string + +func (k Key) String() string { return strings.Join(k, ".") } + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return `"` + quotedReplacer.Replace(k[i]) + `"` + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 0000000000..db89eac1d1 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,33 @@ +package toml + +import ( + "encoding" + "io" +) + +// DEPRECATED! +// +// Use the identical encoding.TextMarshaler instead. It is defined here to +// support Go 1.1 and older. +type TextMarshaler encoding.TextMarshaler + +// DEPRECATED! +// +// Use the identical encoding.TextUnmarshaler instead. It is defined here to +// support Go 1.1 and older. +type TextUnmarshaler encoding.TextUnmarshaler + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// DEPRECATED! +// +// Use NewDecoder(reader).Decode(&v) instead. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + return NewDecoder(r).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000000..099c4a77d2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,13 @@ +/* +Package toml implements decoding and encoding of TOML files. + +This package supports TOML v1.0.0, as listed on https://toml.io + +There is also support for delaying decoding with the Primitive type, and +querying the set of keys in a TOML document with the MetaData type. + +The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +and can be used to verify if TOML document is valid. It can also be used to +print the type of each key. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000000..10d88ac632 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,650 @@ +package toml + +import ( + "bufio" + "encoding" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: Only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // The string to use for a single indentation level. The default is two + // spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the Encoder's writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch t := rv.Interface().(type) { + case time.Time, encoding.TextMarshaler: + enc.writeKeyValue(key, rv, false) + return + // TODO: #76 would make this superfluous after implemented. + case Primitive: + enc.encode(key, reflect.ValueOf(t.undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case encoding.TextMarshaler: + // Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + + switch rv.Kind() { + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + continue + } + + frv := rv.Field(i) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + if getOptions(f.Tag).name == "" { + addFields(t, frv, append(start, f.Index...)) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) + } + continue + } + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case encoding.TextMarshaler: + return tomlString + default: + // Someone used a pointer receiver: we can make it work for pointer + // values. + if rv.CanAddr() { + _, ok := rv.Addr().Interface().(encoding.TextMarshaler) + if ok { + return tomlString + } + } + return tomlHash + } + default: + _, ok := rv.Interface().(encoding.TextMarshaler) + if ok { + return tomlString + } + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("") // Need *some* return value + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + + /// Don't allow nil. + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + if tomlTypeOfGo(rv.Index(i)) == nil { + encPanic(errArrayNilElement) + } + } + + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// If inline is true it won't add a newline at the end. +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + if len(key) == 0 { + encPanic(errNoKey) + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/go.mod b/vendor/github.com/BurntSushi/toml/go.mod new file mode 100644 index 0000000000..82989481df --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/go.mod @@ -0,0 +1,3 @@ +module github.com/BurntSushi/toml + +go 1.16 diff --git a/vendor/github.com/BurntSushi/toml/go.sum b/vendor/github.com/BurntSushi/toml/go.sum new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 0000000000..022f15bc2b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000000..adc4eb5d53 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1225 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to four runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos]) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf( + "expected end of table array name delimiter %q, but got %q instead", + arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == stringStart: + lx.ignore() // ignore the '"' + return lexString + case r == rawStringStart: + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == stringStart || r == rawStringStart: + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator %q", keySep) + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %s instead", + arrayEnd, runeOrEOF(r)) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + default: + return lx.errorf( + "expected a comma or an inline table terminator %q, but got %s instead", + inlineTableEnd, runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isControl(r) || r == '\r': + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\r': + if lx.peek() != '\n' { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineString + case '\\': + return lexMultilineStringEscape + case stringEnd: + /// Found " → try to read two more "". + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == stringEnd { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), `"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + + if isControl(r) { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isControl(r) || r == '\r': + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\r': + if lx.peek() != '\n' { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineRawString + case rawStringEnd: + /// Found ' → try to read two more ''. + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == rawStringEnd { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + + if isControl(r) { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + case isControl(r): + return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r) + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +// Control characters except \n, \t +func isControl(r rune) bool { + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isOctal(r rune) bool { + return r >= '0' && r <= '7' +} + +func isBinary(r rune) bool { + return r == '0' || r == '1' +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000000..d9ae5db946 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,739 @@ +package toml + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + ordered []Key // List of keys in the order that they appear in the TOML data. + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + approxLine int // Rough approximation of line number + implicits map[string]bool // Record implied keys (e.g. 'key.group.names'). +} + +// ParseError is used when a file can't be parsed: for example invalid integer +// literals, duplicate keys, etc. +type ParseError struct { + Message string + Line int + LastKey string +} + +func (pe ParseError) Error() string { + return fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + pe.Line, pe.LastKey, pe.Message) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(ParseError); ok { + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if strings.ContainsRune(data[:ex], 0) { + return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8") + } + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf(format, v...) + panic(ParseError{ + Message: msg, + Line: p.approxLine, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.next() + p.approxLine = name.line + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.next() + p.approxLine = name.line + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.next() + p.approxLine = k.line + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicf("Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicf("Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray) + + // p.setType(p.currentKey, typ) + var ( + array []interface{} + types []tomlType + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.next() + p.approxLine = k.line + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType) { + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true } +func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false } +func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] } +func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// Remove newlines inside triple-quoted strings if a line ends with "\". +func stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + } + } + return strings.Join(split, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case ' ', '\t': + p.panicf("invalid escape: '\\%c'", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 0000000000..d56aa80faf --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000000..608997c22f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml new file mode 100644 index 0000000000..096369d44d --- /dev/null +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -0,0 +1,29 @@ +language: go + +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - tip + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +script: + - make setup + - make test + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md new file mode 100644 index 0000000000..e405c9a84d --- /dev/null +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -0,0 +1,109 @@ +# 1.5.0 (2019-09-11) + +## Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +## Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +## Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +# 1.4.0 (2017-10-04) + +## Changed +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +# 1.3.1 (2017-07-10) + +## Fixed +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +# 1.3.0 (2017-05-02) + +## Added +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +## Fixed +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +## Changed +- #55: The godoc icon moved from png to svg + +# 1.2.3 (2017-04-03) + +## Fixed +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +# Release 1.2.2 (2016-12-13) + +## Fixed +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +# Release 1.2.1 (2016-11-28) + +## Fixed +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +# Release 1.2.0 (2016-11-04) + +## Added +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +## Fixed +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +# Release 1.1.1 (2016-06-30) + +## Changed +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +# Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +# Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +# Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/LICENSE.txt b/vendor/github.com/Masterminds/semver/LICENSE.txt new file mode 100644 index 0000000000..9ff7da9c48 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/Makefile b/vendor/github.com/Masterminds/semver/Makefile new file mode 100644 index 0000000000..a7a1b4e36d --- /dev/null +++ b/vendor/github.com/Masterminds/semver/Makefile @@ -0,0 +1,36 @@ +.PHONY: setup +setup: + go get -u gopkg.in/alecthomas/gometalinter.v1 + gometalinter.v1 --install + +.PHONY: test +test: validate lint + @echo "==> Running tests" + go test -v + +.PHONY: validate +validate: + @echo "==> Running static validations" + @gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1 + +.PHONY: lint +lint: + @echo "==> Running linters" + @gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || : diff --git a/vendor/github.com/Masterminds/semver/README.md b/vendor/github.com/Masterminds/semver/README.md new file mode 100644 index 0000000000..1b52d2f436 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/README.md @@ -0,0 +1,194 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![Build Status](https://travis-ci.org/Masterminds/semver.svg)](https://travis-ci.org/Masterminds/semver) [![Build status](https://ci.appveyor.com/api/projects/status/jfk66lib7hb985k8/branch/master?svg=true&passingText=windows%20build%20passing&failingText=windows%20build%20failing)](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [![GoDoc](https://godoc.org/github.com/Masterminds/semver?status.svg)](https://godoc.org/github.com/Masterminds/semver) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + +```go + v, err := semver.NewVersion("1.2.3-beta.1+build345") +``` + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the [documentation](https://godoc.org/github.com/Masterminds/semver). + +## Sorting Semantic Versions + +A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/) +package from the standard library. For example, + +```go + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) +``` + +## Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +## Working With Pre-release Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precidence, pre-releases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification pre-releases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons without a pre-release comparator will skip pre-release versions. +For example, `>=1.2.3` will skip pre-releases when looking at a list of releases +while `>=1.2.3-0` will evaluate and find pre-releases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the spec. The lowest character is a `0` in ASCII sort order (see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +## Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +## Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +## Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +## Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^0.0.1` is equivalent to `>= 0.0.1, < 1.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` + +# Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +``` + +# Fuzzing + + [dvyukov/go-fuzz](https://github.com/dvyukov/go-fuzz) is used for fuzzing. + +1. `go-fuzz-build` +2. `go-fuzz -workdir=fuzz` + +# Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/appveyor.yml b/vendor/github.com/Masterminds/semver/appveyor.yml new file mode 100644 index 0000000000..b2778df15a --- /dev/null +++ b/vendor/github.com/Masterminds/semver/appveyor.yml @@ -0,0 +1,44 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\semver +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +install: + - go version + - go env + - go get -u gopkg.in/alecthomas/gometalinter.v1 + - set PATH=%PATH%;%GOPATH%\bin + - gometalinter.v1.exe --install + +build_script: + - go install -v ./... + +test_script: + - "gometalinter.v1 \ + --disable-all \ + --enable deadcode \ + --severity deadcode:error \ + --enable gofmt \ + --enable gosimple \ + --enable ineffassign \ + --enable misspell \ + --enable vet \ + --tests \ + --vendor \ + --deadline 60s \ + ./... || exit_code=1" + - "gometalinter.v1 \ + --disable-all \ + --enable golint \ + --vendor \ + --deadline 60s \ + ./... || :" + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/semver/collection.go b/vendor/github.com/Masterminds/semver/collection.go new file mode 100644 index 0000000000..a78235895f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/constraints.go b/vendor/github.com/Masterminds/semver/constraints.go new file mode 100644 index 0000000000..b94b93413f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/constraints.go @@ -0,0 +1,423 @@ +package semver + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + cs := strings.Split(v, ",") + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if !c.check(v) { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if !c.check(v) { + em := fmt.Errorf(c.msg, v, c.orig) + e = append(e, em) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +var constraintOps map[string]cfunc +var constraintMsg map[string]string +var constraintRegex *regexp.Regexp + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + constraintMsg = map[string]string{ + "": "%s is not equal to %s", + "=": "%s is not equal to %s", + "!=": "%s is equal to %s", + ">": "%s is less than or equal to %s", + "<": "%s is greater than or equal to %s", + ">=": "%s is less than %s", + "=>": "%s is less than %s", + "<=": "%s is greater than %s", + "=<": "%s is greater than %s", + "~": "%s does not have same major and minor version as %s", + "~>": "%s does not have same major and minor version as %s", + "^": "%s does not have same major version as %s", + } + + ops := make([]string, 0, len(constraintOps)) + for k := range constraintOps { + ops = append(ops, regexp.QuoteMeta(k)) + } + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + strings.Join(ops, "|"), + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) +} + +// An individual constraint +type constraint struct { + // The callback function for the restraint. It performs the logic for + // the constraint. + function cfunc + + msg string + + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) bool { + return c.function(v, c) +} + +type cfunc func(v *Version, c *constraint) bool + +func parseConstraint(c string) (*constraint, error) { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + ver := m[2] + orig := ver + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + function: constraintOps[m[1]], + msg: constraintMsg[m[1]], + con: con, + orig: orig, + minorDirty: minorDirty, + patchDirty: patchDirty, + dirty: dirty, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) bool { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.con.Major() != v.Major() { + return true + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true + } else if c.minorDirty { + return false + } + + return false + } + + return !v.Equal(c.con) +} + +func constraintGreaterThan(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) == 1 +} + +func constraintLessThan(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) < 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +func constraintGreaterThanEqual(v *Version, c *constraint) bool { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + return v.Compare(c.con) >= 0 +} + +func constraintLessThanEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if !c.dirty { + return v.Compare(c.con) <= 0 + } + + if v.Major() > c.con.Major() { + return false + } else if v.Minor() > c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true + } + + if v.Major() != c.con.Major() { + return false + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false + } + + return true +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if c.dirty { + c.msg = constraintMsg["~"] + return constraintTilde(v, c) + } + + return v.Equal(c.con) +} + +// ^* --> (any) +// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0 +// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0 +// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0 +// ^1.2.3 --> >=1.2.3, <2.0.0 +// ^1.2.0 --> >=1.2.0, <2.0.0 +func constraintCaret(v *Version, c *constraint) bool { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false + } + + if v.LessThan(c.con) { + return false + } + + if v.Major() != c.con.Major() { + return false + } + + return true +} + +var constraintRangeRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/doc.go b/vendor/github.com/Masterminds/semver/doc.go new file mode 100644 index 0000000000..6a6c24c6d6 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/doc.go @@ -0,0 +1,115 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +To parse a semantic version use the `NewVersion` function. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +If there is an error the version wasn't parseable. The version object has methods +to get the parts of the version, compare it to other versions, convert the +version back into a string, and get the original string. For more details +please see the documentation at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parseable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma separated and comparisons. These are then separated by || separated or +comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the pack level comparison (see tilde below). For example, + + * `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3, < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` + * `~1.x` is equivalent to `>= 1, < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes. This is useful +when comparisons of API versions as a major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go new file mode 100644 index 0000000000..400d4f9341 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version.go @@ -0,0 +1,425 @@ +package semver + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp +var validPrereleaseRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// SemVerRegex is the regular expression used to parse a semantic version. +const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// ValidPrerelease is the regular expression which validates +// both prerelease and metadata values. +const ValidPrerelease string = `^([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*)$` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch int64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + SemVerRegex + "$") + validPrereleaseRegex = regexp.MustCompile(ValidPrerelease) +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var temp int64 + temp, err := strconv.ParseInt(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.major = temp + + if m[2] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.minor = temp + } else { + sv.minor = 0 + } + + if m[3] != "" { + temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + sv.patch = temp + } else { + sv.patch = 0 + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v *Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v *Version) Major() int64 { + return v.major +} + +// Minor returns the minor version. +func (v *Version) Minor() int64 { + return v.minor +} + +// Patch returns the patch version. +func (v *Version) Patch() int64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v *Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v *Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v *Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps curent patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hypen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 && !validPrereleaseRegex.MatchString(prerelease) { + return vNext, ErrInvalidPrerelease + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 && !validPrereleaseRegex.MatchString(metadata) { + return vNext, ErrInvalidMetadata + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + temp = nil + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v *Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +func compareSegment(v, o int64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} diff --git a/vendor/github.com/Masterminds/semver/version_fuzz.go b/vendor/github.com/Masterminds/semver/version_fuzz.go new file mode 100644 index 0000000000..b42bcd62b9 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/version_fuzz.go @@ -0,0 +1,10 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + if _, err := NewVersion(string(data)); err != nil { + return 0 + } + return 1 +} diff --git a/vendor/github.com/Microsoft/go-winio/.gitignore b/vendor/github.com/Microsoft/go-winio/.gitignore new file mode 100644 index 0000000000..b883f1fdc6 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/.gitignore @@ -0,0 +1 @@ +*.exe diff --git a/vendor/github.com/Microsoft/go-winio/CODEOWNERS b/vendor/github.com/Microsoft/go-winio/CODEOWNERS new file mode 100644 index 0000000000..ae1b4942b9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/CODEOWNERS @@ -0,0 +1 @@ + * @microsoft/containerplat diff --git a/vendor/github.com/Microsoft/go-winio/LICENSE b/vendor/github.com/Microsoft/go-winio/LICENSE new file mode 100644 index 0000000000..b8b569d774 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/Microsoft/go-winio/README.md b/vendor/github.com/Microsoft/go-winio/README.md new file mode 100644 index 0000000000..683be1dcf9 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/README.md @@ -0,0 +1,37 @@ +# go-winio [![Build Status](https://github.com/microsoft/go-winio/actions/workflows/ci.yml/badge.svg)](https://github.com/microsoft/go-winio/actions/workflows/ci.yml) + +This repository contains utilities for efficiently performing Win32 IO operations in +Go. Currently, this is focused on accessing named pipes and other file handles, and +for using named pipes as a net transport. + +This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go +to reuse the thread to schedule another goroutine. This limits support to Windows Vista and +newer operating systems. This is similar to the implementation of network sockets in Go's net +package. + +Please see the LICENSE file for licensing information. + +## Contributing + +This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) +declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com. + +When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR +appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. + +We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves +or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can +attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off. + + +## Code of Conduct + +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). +For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or +contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + + + +## Special Thanks +Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe +for another named pipe implementation. diff --git a/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/Microsoft/go-winio/backup.go new file mode 100644 index 0000000000..2be34af431 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/backup.go @@ -0,0 +1,280 @@ +// +build windows + +package winio + +import ( + "encoding/binary" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "syscall" + "unicode/utf16" +) + +//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead +//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite + +const ( + BackupData = uint32(iota + 1) + BackupEaData + BackupSecurity + BackupAlternateData + BackupLink + BackupPropertyData + BackupObjectId + BackupReparseData + BackupSparseBlock + BackupTxfsData +) + +const ( + StreamSparseAttributes = uint32(8) +) + +const ( + WRITE_DAC = 0x40000 + WRITE_OWNER = 0x80000 + ACCESS_SYSTEM_SECURITY = 0x1000000 +) + +// BackupHeader represents a backup stream of a file. +type BackupHeader struct { + Id uint32 // The backup stream ID + Attributes uint32 // Stream attributes + Size int64 // The size of the stream in bytes + Name string // The name of the stream (for BackupAlternateData only). + Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). +} + +type win32StreamId struct { + StreamId uint32 + Attributes uint32 + Size uint64 + NameSize uint32 +} + +// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series +// of BackupHeader values. +type BackupStreamReader struct { + r io.Reader + bytesLeft int64 +} + +// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. +func NewBackupStreamReader(r io.Reader) *BackupStreamReader { + return &BackupStreamReader{r, 0} +} + +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if +// it was not completely read. +func (r *BackupStreamReader) Next() (*BackupHeader, error) { + if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } + if _, err := io.Copy(ioutil.Discard, r); err != nil { + return nil, err + } + } + var wsi win32StreamId + if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { + return nil, err + } + hdr := &BackupHeader{ + Id: wsi.StreamId, + Attributes: wsi.Attributes, + Size: int64(wsi.Size), + } + if wsi.NameSize != 0 { + name := make([]uint16, int(wsi.NameSize/2)) + if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { + return nil, err + } + hdr.Name = syscall.UTF16ToString(name) + } + if wsi.StreamId == BackupSparseBlock { + if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { + return nil, err + } + hdr.Size -= 8 + } + r.bytesLeft = hdr.Size + return hdr, nil +} + +// Read reads from the current backup stream. +func (r *BackupStreamReader) Read(b []byte) (int, error) { + if r.bytesLeft == 0 { + return 0, io.EOF + } + if int64(len(b)) > r.bytesLeft { + b = b[:r.bytesLeft] + } + n, err := r.r.Read(b) + r.bytesLeft -= int64(n) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } else if r.bytesLeft == 0 && err == nil { + err = io.EOF + } + return n, err +} + +// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. +type BackupStreamWriter struct { + w io.Writer + bytesLeft int64 +} + +// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. +func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { + return &BackupStreamWriter{w, 0} +} + +// WriteHeader writes the next backup stream header and prepares for calls to Write(). +func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { + if w.bytesLeft != 0 { + return fmt.Errorf("missing %d bytes", w.bytesLeft) + } + name := utf16.Encode([]rune(hdr.Name)) + wsi := win32StreamId{ + StreamId: hdr.Id, + Attributes: hdr.Attributes, + Size: uint64(hdr.Size), + NameSize: uint32(len(name) * 2), + } + if hdr.Id == BackupSparseBlock { + // Include space for the int64 block offset + wsi.Size += 8 + } + if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { + return err + } + if len(name) != 0 { + if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { + return err + } + } + if hdr.Id == BackupSparseBlock { + if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { + return err + } + } + w.bytesLeft = hdr.Size + return nil +} + +// Write writes to the current backup stream. +func (w *BackupStreamWriter) Write(b []byte) (int, error) { + if w.bytesLeft < int64(len(b)) { + return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) + } + n, err := w.w.Write(b) + w.bytesLeft -= int64(n) + return n, err +} + +// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. +type BackupFileReader struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, +// Read will attempt to read the security descriptor of the file. +func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { + r := &BackupFileReader{f, includeSecurity, 0} + return r +} + +// Read reads a backup stream from the file by calling the Win32 API BackupRead(). +func (r *BackupFileReader) Read(b []byte) (int, error) { + var bytesRead uint32 + err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) + if err != nil { + return 0, &os.PathError{"BackupRead", r.f.Name(), err} + } + runtime.KeepAlive(r.f) + if bytesRead == 0 { + return 0, io.EOF + } + return int(bytesRead), nil +} + +// Close frees Win32 resources associated with the BackupFileReader. It does not close +// the underlying file. +func (r *BackupFileReader) Close() error { + if r.ctx != 0 { + backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) + runtime.KeepAlive(r.f) + r.ctx = 0 + } + return nil +} + +// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. +type BackupFileWriter struct { + f *os.File + includeSecurity bool + ctx uintptr +} + +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// Write() will attempt to restore the security descriptor from the stream. +func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { + w := &BackupFileWriter{f, includeSecurity, 0} + return w +} + +// Write restores a portion of the file using the provided backup stream. +func (w *BackupFileWriter) Write(b []byte) (int, error) { + var bytesWritten uint32 + err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) + if err != nil { + return 0, &os.PathError{"BackupWrite", w.f.Name(), err} + } + runtime.KeepAlive(w.f) + if int(bytesWritten) != len(b) { + return int(bytesWritten), errors.New("not all bytes could be written") + } + return len(b), nil +} + +// Close frees Win32 resources associated with the BackupFileWriter. It does not +// close the underlying file. +func (w *BackupFileWriter) Close() error { + if w.ctx != 0 { + backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) + runtime.KeepAlive(w.f) + w.ctx = 0 + } + return nil +} + +// OpenForBackup opens a file or directory, potentially skipping access checks if the backup +// or restore privileges have been acquired. +// +// If the file opened was a directory, it cannot be used with Readdir(). +func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { + winPath, err := syscall.UTF16FromString(path) + if err != nil { + return nil, err + } + h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + if err != nil { + err = &os.PathError{Op: "open", Path: path, Err: err} + return nil, err + } + return os.NewFile(uintptr(h), path), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 0000000000..4051c1b33b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/Microsoft/go-winio/file.go new file mode 100644 index 0000000000..0385e41081 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/file.go @@ -0,0 +1,323 @@ +// +build windows + +package winio + +import ( + "errors" + "io" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" +) + +//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx +//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort +//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus +//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult + +type atomicBool int32 + +func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } +func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } +func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} + +const ( + cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 + cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 +) + +var ( + ErrFileClosed = errors.New("file has already been closed") + ErrTimeout = &timeoutError{} +) + +type timeoutError struct{} + +func (e *timeoutError) Error() string { return "i/o timeout" } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } + +type timeoutChan chan struct{} + +var ioInitOnce sync.Once +var ioCompletionPort syscall.Handle + +// ioResult contains the result of an asynchronous IO operation +type ioResult struct { + bytes uint32 + err error +} + +// ioOperation represents an outstanding asynchronous Win32 IO +type ioOperation struct { + o syscall.Overlapped + ch chan ioResult +} + +func initIo() { + h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) + if err != nil { + panic(err) + } + ioCompletionPort = h + go ioCompletionProcessor(h) +} + +// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. +// It takes ownership of this handle and will close it if it is garbage collected. +type win32File struct { + handle syscall.Handle + wg sync.WaitGroup + wgLock sync.RWMutex + closing atomicBool + socket bool + readDeadline deadlineHandler + writeDeadline deadlineHandler +} + +type deadlineHandler struct { + setLock sync.Mutex + channel timeoutChan + channelLock sync.RWMutex + timer *time.Timer + timedout atomicBool +} + +// makeWin32File makes a new win32File from an existing file handle +func makeWin32File(h syscall.Handle) (*win32File, error) { + f := &win32File{handle: h} + ioInitOnce.Do(initIo) + _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) + if err != nil { + return nil, err + } + err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) + if err != nil { + return nil, err + } + f.readDeadline.channel = make(timeoutChan) + f.writeDeadline.channel = make(timeoutChan) + return f, nil +} + +func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil +} + +// closeHandle closes the resources associated with a Win32 handle +func (f *win32File) closeHandle() { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() + // cancel all IO and wait for it to complete + cancelIoEx(f.handle, nil) + f.wg.Wait() + // at this point, no new IO can start + syscall.Close(f.handle) + f.handle = 0 + } else { + f.wgLock.Unlock() + } +} + +// Close closes a win32File. +func (f *win32File) Close() error { + f.closeHandle() + return nil +} + +// prepareIo prepares for a new IO operation. +// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. +func (f *win32File) prepareIo() (*ioOperation, error) { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() + return nil, ErrFileClosed + } + f.wg.Add(1) + f.wgLock.RUnlock() + c := &ioOperation{} + c.ch = make(chan ioResult) + return c, nil +} + +// ioCompletionProcessor processes completed async IOs forever +func ioCompletionProcessor(h syscall.Handle) { + for { + var bytes uint32 + var key uintptr + var op *ioOperation + err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) + if op == nil { + panic(err) + } + op.ch <- ioResult{bytes, err} + } +} + +// asyncIo processes the return value from ReadFile or WriteFile, blocking until +// the operation has actually completed. +func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, err error) (int, error) { + if err != syscall.ERROR_IO_PENDING { + return int(bytes), err + } + + if f.closing.isSet() { + cancelIoEx(f.handle, &c.o) + } + + var timeout timeoutChan + if d != nil { + d.channelLock.Lock() + timeout = d.channel + d.channelLock.Unlock() + } + + var r ioResult + select { + case r = <-c.ch: + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + if f.closing.isSet() { + err = ErrFileClosed + } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) + } + case <-timeout: + cancelIoEx(f.handle, &c.o) + r = <-c.ch + err = r.err + if err == syscall.ERROR_OPERATION_ABORTED { + err = ErrTimeout + } + } + + // runtime.KeepAlive is needed, as c is passed via native + // code to ioCompletionProcessor, c must remain alive + // until the channel read is complete. + runtime.KeepAlive(c) + return int(r.bytes), err +} + +// Read reads from a file handle. +func (f *win32File) Read(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.readDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.ReadFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.readDeadline, bytes, err) + runtime.KeepAlive(b) + + // Handle EOF conditions. + if err == nil && n == 0 && len(b) != 0 { + return 0, io.EOF + } else if err == syscall.ERROR_BROKEN_PIPE { + return 0, io.EOF + } else { + return n, err + } +} + +// Write writes to a file handle. +func (f *win32File) Write(b []byte) (int, error) { + c, err := f.prepareIo() + if err != nil { + return 0, err + } + defer f.wg.Done() + + if f.writeDeadline.timedout.isSet() { + return 0, ErrTimeout + } + + var bytes uint32 + err = syscall.WriteFile(f.handle, b, &bytes, &c.o) + n, err := f.asyncIo(c, &f.writeDeadline, bytes, err) + runtime.KeepAlive(b) + return n, err +} + +func (f *win32File) SetReadDeadline(deadline time.Time) error { + return f.readDeadline.set(deadline) +} + +func (f *win32File) SetWriteDeadline(deadline time.Time) error { + return f.writeDeadline.set(deadline) +} + +func (f *win32File) Flush() error { + return syscall.FlushFileBuffers(f.handle) +} + +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + +func (d *deadlineHandler) set(deadline time.Time) error { + d.setLock.Lock() + defer d.setLock.Unlock() + + if d.timer != nil { + if !d.timer.Stop() { + <-d.channel + } + d.timer = nil + } + d.timedout.setFalse() + + select { + case <-d.channel: + d.channelLock.Lock() + d.channel = make(chan struct{}) + d.channelLock.Unlock() + default: + } + + if deadline.IsZero() { + return nil + } + + timeoutIO := func() { + d.timedout.setTrue() + close(d.channel) + } + + now := time.Now() + duration := deadline.Sub(now) + if deadline.After(now) { + // Deadline is in the future, set a timer to wait + d.timer = time.AfterFunc(duration, timeoutIO) + } else { + // Deadline is in the past. Cancel all pending IO now. + timeoutIO() + } + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/Microsoft/go-winio/fileinfo.go new file mode 100644 index 0000000000..3ab6bff69c --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -0,0 +1,73 @@ +// +build windows + +package winio + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +// FileBasicInfo contains file access time and file attributes information. +type FileBasicInfo struct { + CreationTime, LastAccessTime, LastWriteTime, ChangeTime windows.Filetime + FileAttributes uint32 + pad uint32 // padding +} + +// GetFileBasicInfo retrieves times and attributes for a file. +func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { + bi := &FileBasicInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return bi, nil +} + +// SetFileBasicInfo sets times and attributes for a file. +func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { + if err := windows.SetFileInformationByHandle(windows.Handle(f.Fd()), windows.FileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { + return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return nil +} + +// FileStandardInfo contains extended information for the file. +// FILE_STANDARD_INFO in WinBase.h +// https://docs.microsoft.com/en-us/windows/win32/api/winbase/ns-winbase-file_standard_info +type FileStandardInfo struct { + AllocationSize, EndOfFile int64 + NumberOfLinks uint32 + DeletePending, Directory bool +} + +// GetFileStandardInfo retrieves ended information for the file. +func GetFileStandardInfo(f *os.File) (*FileStandardInfo, error) { + si := &FileStandardInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileStandardInfo, (*byte)(unsafe.Pointer(si)), uint32(unsafe.Sizeof(*si))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return si, nil +} + +// FileIDInfo contains the volume serial number and file ID for a file. This pair should be +// unique on a system. +type FileIDInfo struct { + VolumeSerialNumber uint64 + FileID [16]byte +} + +// GetFileID retrieves the unique (volume, file ID) pair for a file. +func GetFileID(f *os.File) (*FileIDInfo, error) { + fileID := &FileIDInfo{} + if err := windows.GetFileInformationByHandleEx(windows.Handle(f.Fd()), windows.FileIdInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { + return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} + } + runtime.KeepAlive(f) + return fileID, nil +} diff --git a/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/Microsoft/go-winio/go.mod new file mode 100644 index 0000000000..98a8dea0e7 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/go.mod @@ -0,0 +1,9 @@ +module github.com/Microsoft/go-winio + +go 1.12 + +require ( + github.com/pkg/errors v0.9.1 + github.com/sirupsen/logrus v1.7.0 + golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c +) diff --git a/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/Microsoft/go-winio/go.sum new file mode 100644 index 0000000000..aa6ad3b571 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 0000000000..b632f8f8bb --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,307 @@ +// +build windows + +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) shutdown(how int) error { + err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, notifying the other endpoint that +// no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/Microsoft/go-winio/pipe.go new file mode 100644 index 0000000000..96700a73de --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pipe.go @@ -0,0 +1,517 @@ +// +build windows + +package winio + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "runtime" + "syscall" + "time" + "unsafe" +) + +//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe +//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW +//sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW +//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo +//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW +//sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} + +const ( + cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) + cERROR_PIPE_CONNECTED = syscall.Errno(535) + cERROR_SEM_TIMEOUT = syscall.Errno(121) + + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 + + cPIPE_TYPE_MESSAGE = 4 + + cPIPE_READMODE_MESSAGE = 2 + + cFILE_OPEN = 1 + cFILE_CREATE = 2 + + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 + + cSE_DACL_PRESENT = 4 +) + +var ( + // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. + // This error should match net.errClosing since docker takes a dependency on its text. + ErrPipeListenerClosed = errors.New("use of closed network connection") + + errPipeWriteClosed = errors.New("pipe has been closed for write") +) + +type win32Pipe struct { + *win32File + path string +} + +type win32MessageBytePipe struct { + win32Pipe + writeClosed bool + readEOF bool +} + +type pipeAddress string + +func (f *win32Pipe) LocalAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) RemoteAddr() net.Addr { + return pipeAddress(f.path) +} + +func (f *win32Pipe) SetDeadline(t time.Time) error { + f.SetReadDeadline(t) + f.SetWriteDeadline(t) + return nil +} + +// CloseWrite closes the write side of a message pipe in byte mode. +func (f *win32MessageBytePipe) CloseWrite() error { + if f.writeClosed { + return errPipeWriteClosed + } + err := f.win32File.Flush() + if err != nil { + return err + } + _, err = f.win32File.Write(nil) + if err != nil { + return err + } + f.writeClosed = true + return nil +} + +// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since +// they are used to implement CloseWrite(). +func (f *win32MessageBytePipe) Write(b []byte) (int, error) { + if f.writeClosed { + return 0, errPipeWriteClosed + } + if len(b) == 0 { + return 0, nil + } + return f.win32File.Write(b) +} + +// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message +// mode pipe will return io.EOF, as will all subsequent reads. +func (f *win32MessageBytePipe) Read(b []byte) (int, error) { + if f.readEOF { + return 0, io.EOF + } + n, err := f.win32File.Read(b) + if err == io.EOF { + // If this was the result of a zero-byte read, then + // it is possible that the read was due to a zero-size + // message. Since we are simulating CloseWrite with a + // zero-byte message, ensure that all future Read() calls + // also return EOF. + f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil + } + return n, err +} + +func (s pipeAddress) Network() string { + return "pipe" +} + +func (s pipeAddress) String() string { + return string(s) +} + +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string, access uint32) (syscall.Handle, error) { + for { + + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, access, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(10 * time.Millisecond) + } + } +} + +// DialPipe connects to a named pipe by path, timing out if the connection +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) +func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { + var absTimeout time.Time + if timeout != nil { + absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(2 * time.Second) + } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { + return DialPipeAccess(ctx, path, syscall.GENERIC_READ|syscall.GENERIC_WRITE) +} + +// DialPipeAccess attempts to connect to a named pipe by `path` with `access` until `ctx` +// cancellation or timeout. +func DialPipeAccess(ctx context.Context, path string, access uint32) (net.Conn, error) { + var err error + var h syscall.Handle + h, err = tryDialPipe(ctx, &path, access) + if err != nil { + return nil, err + } + + var flags uint32 + err = getNamedPipeInfo(h, &flags, nil, nil, nil) + if err != nil { + return nil, err + } + + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + + // If the pipe is in message mode, return a message byte pipe, which + // supports CloseWrite(). + if flags&cPIPE_TYPE_MESSAGE != 0 { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: f, path: path}, + }, nil + } + return &win32Pipe{win32File: f, path: path}, nil +} + +type acceptResponse struct { + f *win32File + err error +} + +type win32PipeListener struct { + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int +} + +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= cFILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) + return h, nil +} + +func (l *win32PipeListener) makeServerPipe() (*win32File, error) { + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) + if err != nil { + return nil, err + } + f, err := makeWin32File(h) + if err != nil { + syscall.Close(h) + return nil, err + } + return f, nil +} + +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + +func (l *win32PipeListener) listenerRoutine() { + closed := false + for !closed { + select { + case <-l.closeCh: + closed = true + case responseCh := <-l.acceptCh: + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break + } + } + responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed + } + } + syscall.Close(l.firstHandle) + l.firstHandle = 0 + // Notify Close() and Accept() callers that the handle has been closed. + close(l.doneCh) +} + +// PipeConfig contain configuration for the pipe listener. +type PipeConfig struct { + // SecurityDescriptor contains a Windows security descriptor in SDDL format. + SecurityDescriptor string + + // MessageMode determines whether the pipe is in byte or message mode. In either + // case the pipe is read in byte mode by default. The only practical difference in + // this implementation is that CloseWrite() is only supported for message mode pipes; + // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only + // transferred to the reader (and returned as io.EOF in this implementation) + // when the pipe is in message mode. + MessageMode bool + + // InputBufferSize specifies the size of the input buffer, in bytes. + InputBufferSize int32 + + // OutputBufferSize specifies the size of the output buffer, in bytes. + OutputBufferSize int32 +} + +// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. +// The pipe must not already exist. +func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { + var ( + sd []byte + err error + ) + if c == nil { + c = &PipeConfig{} + } + if c.SecurityDescriptor != "" { + sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) + if err != nil { + return nil, err + } + } + h, err := makeServerPipeHandle(path, sd, c, true) + if err != nil { + return nil, err + } + l := &win32PipeListener{ + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), + } + go l.listenerRoutine() + return l, nil +} + +func connectPipe(p *win32File) error { + c, err := p.prepareIo() + if err != nil { + return err + } + defer p.wg.Done() + + err = connectNamedPipe(p.handle, &c.o) + _, err = p.asyncIo(c, nil, 0, err) + if err != nil && err != cERROR_PIPE_CONNECTED { + return err + } + return nil +} + +func (l *win32PipeListener) Accept() (net.Conn, error) { + ch := make(chan acceptResponse) + select { + case l.acceptCh <- ch: + response := <-ch + err := response.err + if err != nil { + return nil, err + } + if l.config.MessageMode { + return &win32MessageBytePipe{ + win32Pipe: win32Pipe{win32File: response.f, path: l.path}, + }, nil + } + return &win32Pipe{win32File: response.f, path: l.path}, nil + case <-l.doneCh: + return nil, ErrPipeListenerClosed + } +} + +func (l *win32PipeListener) Close() error { + select { + case l.closeCh <- 1: + <-l.doneCh + case <-l.doneCh: + } + return nil +} + +func (l *win32PipeListener) Addr() net.Addr { + return pipeAddress(l.path) +} diff --git a/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 0000000000..f497c0e391 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,237 @@ +// +build windows + +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" + + "golang.org/x/sys/windows" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go new file mode 100644 index 0000000000..c3dd7c2176 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -0,0 +1,203 @@ +// +build windows + +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "runtime" + "sync" + "syscall" + "unicode/utf16" + + "golang.org/x/sys/windows" +) + +//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges +//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf +//sys revertToSelf() (err error) = advapi32.RevertToSelf +//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken +//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread +//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW +//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW +//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW + +const ( + SE_PRIVILEGE_ENABLED = 2 + + ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 + + SeBackupPrivilege = "SeBackupPrivilege" + SeRestorePrivilege = "SeRestorePrivilege" + SeSecurityPrivilege = "SeSecurityPrivilege" +) + +const ( + securityAnonymous = iota + securityIdentification + securityImpersonation + securityDelegation +) + +var ( + privNames = make(map[string]uint64) + privNameMutex sync.Mutex +) + +// PrivilegeError represents an error enabling privileges. +type PrivilegeError struct { + privileges []uint64 +} + +func (e *PrivilegeError) Error() string { + s := "" + if len(e.privileges) > 1 { + s = "Could not enable privileges " + } else { + s = "Could not enable privilege " + } + for i, p := range e.privileges { + if i != 0 { + s += ", " + } + s += `"` + s += getPrivilegeName(p) + s += `"` + } + return s +} + +// RunWithPrivilege enables a single privilege for a function call. +func RunWithPrivilege(name string, fn func() error) error { + return RunWithPrivileges([]string{name}, fn) +} + +// RunWithPrivileges enables privileges for a function call. +func RunWithPrivileges(names []string, fn func() error) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + runtime.LockOSThread() + defer runtime.UnlockOSThread() + token, err := newThreadToken() + if err != nil { + return err + } + defer releaseThreadToken(token) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) + if err != nil { + return err + } + return fn() +} + +func mapPrivileges(names []string) ([]uint64, error) { + var privileges []uint64 + privNameMutex.Lock() + defer privNameMutex.Unlock() + for _, name := range names { + p, ok := privNames[name] + if !ok { + err := lookupPrivilegeValue("", name, &p) + if err != nil { + return nil, err + } + privNames[name] = p + } + privileges = append(privileges, p) + } + return privileges, nil +} + +// EnableProcessPrivileges enables privileges globally for the process. +func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { + privileges, err := mapPrivileges(names) + if err != nil { + return err + } + + p, _ := windows.GetCurrentProcess() + var token windows.Token + err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) + if err != nil { + return err + } + + defer token.Close() + return adjustPrivileges(token, privileges, action) +} + +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) + for _, p := range privileges { + binary.Write(&b, binary.LittleEndian, p) + binary.Write(&b, binary.LittleEndian, action) + } + prevState := make([]byte, b.Len()) + reqSize := uint32(0) + success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) + if !success { + return err + } + if err == ERROR_NOT_ALL_ASSIGNED { + return &PrivilegeError{privileges} + } + return nil +} + +func getPrivilegeName(luid uint64) string { + var nameBuffer [256]uint16 + bufSize := uint32(len(nameBuffer)) + err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) + if err != nil { + return fmt.Sprintf("", luid) + } + + var displayNameBuffer [256]uint16 + displayBufSize := uint32(len(displayNameBuffer)) + var langID uint32 + err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) + if err != nil { + return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) + } + + return string(utf16.Decode(displayNameBuffer[:displayBufSize])) +} + +func newThreadToken() (windows.Token, error) { + err := impersonateSelf(securityImpersonation) + if err != nil { + return 0, err + } + + var token windows.Token + err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) + if err != nil { + rerr := revertToSelf() + if rerr != nil { + panic(rerr) + } + return 0, err + } + return token, nil +} + +func releaseThreadToken(h windows.Token) { + err := revertToSelf() + if err != nil { + panic(err) + } + h.Close() +} diff --git a/vendor/github.com/Microsoft/go-winio/reparse.go b/vendor/github.com/Microsoft/go-winio/reparse.go new file mode 100644 index 0000000000..fc1ee4d3a3 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/reparse.go @@ -0,0 +1,128 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "fmt" + "strings" + "unicode/utf16" + "unsafe" +) + +const ( + reparseTagMountPoint = 0xA0000003 + reparseTagSymlink = 0xA000000C +) + +type reparseDataBuffer struct { + ReparseTag uint32 + ReparseDataLength uint16 + Reserved uint16 + SubstituteNameOffset uint16 + SubstituteNameLength uint16 + PrintNameOffset uint16 + PrintNameLength uint16 +} + +// ReparsePoint describes a Win32 symlink or mount point. +type ReparsePoint struct { + Target string + IsMountPoint bool +} + +// UnsupportedReparsePointError is returned when trying to decode a non-symlink or +// mount point reparse point. +type UnsupportedReparsePointError struct { + Tag uint32 +} + +func (e *UnsupportedReparsePointError) Error() string { + return fmt.Sprintf("unsupported reparse point %x", e.Tag) +} + +// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink +// or a mount point. +func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { + tag := binary.LittleEndian.Uint32(b[0:4]) + return DecodeReparsePointData(tag, b[8:]) +} + +func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { + isMountPoint := false + switch tag { + case reparseTagMountPoint: + isMountPoint = true + case reparseTagSymlink: + default: + return nil, &UnsupportedReparsePointError{tag} + } + nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) + if !isMountPoint { + nameOffset += 4 + } + nameLength := binary.LittleEndian.Uint16(b[6:8]) + name := make([]uint16, nameLength/2) + err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) + if err != nil { + return nil, err + } + return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil +} + +func isDriveLetter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or +// mount point. +func EncodeReparsePoint(rp *ReparsePoint) []byte { + // Generate an NT path and determine if this is a relative path. + var ntTarget string + relative := false + if strings.HasPrefix(rp.Target, `\\?\`) { + ntTarget = `\??\` + rp.Target[4:] + } else if strings.HasPrefix(rp.Target, `\\`) { + ntTarget = `\??\UNC\` + rp.Target[2:] + } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { + ntTarget = `\??\` + rp.Target + } else { + ntTarget = rp.Target + relative = true + } + + // The paths must be NUL-terminated even though they are counted strings. + target16 := utf16.Encode([]rune(rp.Target + "\x00")) + ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) + + size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 + size += len(ntTarget16)*2 + len(target16)*2 + + tag := uint32(reparseTagMountPoint) + if !rp.IsMountPoint { + tag = reparseTagSymlink + size += 4 // Add room for symlink flags + } + + data := reparseDataBuffer{ + ReparseTag: tag, + ReparseDataLength: uint16(size), + SubstituteNameOffset: 0, + SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), + PrintNameOffset: uint16(len(ntTarget16) * 2), + PrintNameLength: uint16((len(target16) - 1) * 2), + } + + var b bytes.Buffer + binary.Write(&b, binary.LittleEndian, &data) + if !rp.IsMountPoint { + flags := uint32(0) + if relative { + flags |= 1 + } + binary.Write(&b, binary.LittleEndian, flags) + } + + binary.Write(&b, binary.LittleEndian, ntTarget16) + binary.Write(&b, binary.LittleEndian, target16) + return b.Bytes() +} diff --git a/vendor/github.com/Microsoft/go-winio/sd.go b/vendor/github.com/Microsoft/go-winio/sd.go new file mode 100644 index 0000000000..db1b370a1b --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/sd.go @@ -0,0 +1,98 @@ +// +build windows + +package winio + +import ( + "syscall" + "unsafe" +) + +//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW +//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW +//sys localFree(mem uintptr) = LocalFree +//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength + +const ( + cERROR_NONE_MAPPED = syscall.Errno(1332) +) + +type AccountLookupError struct { + Name string + Err error +} + +func (e *AccountLookupError) Error() string { + if e.Name == "" { + return "lookup account: empty account name specified" + } + var s string + switch e.Err { + case cERROR_NONE_MAPPED: + s = "not found" + default: + s = e.Err.Error() + } + return "lookup account " + e.Name + ": " + s +} + +type SddlConversionError struct { + Sddl string + Err error +} + +func (e *SddlConversionError) Error() string { + return "convert " + e.Sddl + ": " + e.Err.Error() +} + +// LookupSidByName looks up the SID of an account by name +func LookupSidByName(name string) (sid string, err error) { + if name == "" { + return "", &AccountLookupError{name, cERROR_NONE_MAPPED} + } + + var sidSize, sidNameUse, refDomainSize uint32 + err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) + if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { + return "", &AccountLookupError{name, err} + } + sidBuffer := make([]byte, sidSize) + refDomainBuffer := make([]uint16, refDomainSize) + err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) + if err != nil { + return "", &AccountLookupError{name, err} + } + var strBuffer *uint16 + err = convertSidToStringSid(&sidBuffer[0], &strBuffer) + if err != nil { + return "", &AccountLookupError{name, err} + } + sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) + localFree(uintptr(unsafe.Pointer(strBuffer))) + return sid, nil +} + +func SddlToSecurityDescriptor(sddl string) ([]byte, error) { + var sdBuffer uintptr + err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) + if err != nil { + return nil, &SddlConversionError{sddl, err} + } + defer localFree(sdBuffer) + sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) + copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) + return sd, nil +} + +func SecurityDescriptorToSddl(sd []byte) (string, error) { + var sddl *uint16 + // The returned string length seems to including an aribtrary number of terminating NULs. + // Don't use it. + err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) + if err != nil { + return "", err + } + defer localFree(uintptr(unsafe.Pointer(sddl))) + return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil +} diff --git a/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/Microsoft/go-winio/syscall.go new file mode 100644 index 0000000000..5955c99fde --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/syscall.go @@ -0,0 +1,3 @@ +package winio + +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go new file mode 100644 index 0000000000..176ff75e32 --- /dev/null +++ b/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -0,0 +1,427 @@ +// Code generated by 'go generate'; DO NOT EDIT. + +package winio + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +var _ unsafe.Pointer + +// Do the interface allocations only once for common +// Errno values. +const ( + errnoERROR_IO_PENDING = 997 +) + +var ( + errERROR_IO_PENDING error = syscall.Errno(errnoERROR_IO_PENDING) + errERROR_EINVAL error = syscall.EINVAL +) + +// errnoErr returns common boxed Errno values, to prevent +// allocations at runtime. +func errnoErr(e syscall.Errno) error { + switch e { + case 0: + return errERROR_EINVAL + case errnoERROR_IO_PENDING: + return errERROR_IO_PENDING + } + // TODO: add more here, after collecting data on the common + // error values see on Windows. (perhaps when running + // all.bat?) + return e +} + +var ( + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") + procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procBackupRead = modkernel32.NewProc("BackupRead") + procBackupWrite = modkernel32.NewProc("BackupWrite") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") + procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") + procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") + procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") + procbind = modws2_32.NewProc("bind") +) + +func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { + var _p0 uint32 + if releaseAll { + _p0 = 1 + } + r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) + success = r0 != 0 + if true { + err = errnoErr(e1) + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertSidToStringSid(sid *byte, str **uint16) (err error) { + r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getSecurityDescriptorLength(sd uintptr) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) + len = uint32(r0) + return +} + +func impersonateSelf(level uint32) (err error) { + r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(accountName) + if err != nil { + return + } + return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) +} + +func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) +} + +func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + return _lookupPrivilegeName(_p0, luid, buffer, size) +} + +func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(systemName) + if err != nil { + return + } + var _p1 *uint16 + _p1, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _lookupPrivilegeValue(_p0, _p1, luid) +} + +func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { + r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { + var _p0 uint32 + if openAsSelf { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func revertToSelf() (err error) { + r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { + var _p0 *byte + if len(b) > 0 { + _p0 = &b[0] + } + var _p1 uint32 + if abort { + _p1 = 1 + } + var _p2 uint32 + if processSecurity { + _p2 = 1 + } + r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { + r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) +} + +func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) + newport = syscall.Handle(r0) + if newport == 0 { + err = errnoErr(e1) + } + return +} + +func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(name) + if err != nil { + return + } + return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) +} + +func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) { + r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) + handle = syscall.Handle(r0) + if handle == syscall.InvalidHandle { + err = errnoErr(e1) + } + return +} + +func getCurrentThread() (h syscall.Handle) { + r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) + h = syscall.Handle(r0) + return +} + +func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { + r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { + r0, _, _ := syscall.Syscall(procLocalAlloc.Addr(), 2, uintptr(uFlags), uintptr(length), 0) + ptr = uintptr(r0) + return +} + +func localFree(mem uintptr) { + syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) + return +} + +func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { + r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + err = errnoErr(e1) + } + return +} diff --git a/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 0000000000..49d21669ae --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 0000000000..3ab3bcd89a --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,50 @@ +package osversion + +import ( + "fmt" + "sync" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +var ( + osv OSVersion + once sync.Once +) + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + once.Do(func() { + var err error + osv = OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + }) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 0000000000..e9267b9554 --- /dev/null +++ b/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,38 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + + // V19H1 (version 1903) corresponds to Windows Server 1903 (semi-annual + // channel). + V19H1 = 18362 + + // V19H2 (version 1909) corresponds to Windows Server 1909 (semi-annual + // channel). + V19H2 = 18363 + + // V20H1 (version 2004) corresponds to Windows Server 2004 (semi-annual + // channel). + V20H1 = 19041 + + // V20H2 corresponds to Windows Server 20H2 (semi-annual channel). + V20H2 = 19042 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/AUTHORS b/vendor/github.com/ProtonMail/go-crypto/AUTHORS new file mode 100644 index 0000000000..2b00ddba0d --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS new file mode 100644 index 0000000000..1fbd3e976f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/github.com/ProtonMail/go-crypto/LICENSE b/vendor/github.com/ProtonMail/go-crypto/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ProtonMail/go-crypto/PATENTS b/vendor/github.com/ProtonMail/go-crypto/PATENTS new file mode 100644 index 0000000000..733099041f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go new file mode 100644 index 0000000000..3ed3f43573 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/bitcurves/bitcurve.go @@ -0,0 +1,381 @@ +package bitcurves + +// Copyright 2010 The Go Authors. All rights reserved. +// Copyright 2011 ThePiachu. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bitelliptic implements several Koblitz elliptic curves over prime +// fields. + +// This package operates, internally, on Jacobian coordinates. For a given +// (x, y) position on the curve, the Jacobian coordinates are (x1, y1, z1) +// where x = x1/z1² and y = y1/z1³. The greatest speedups come when the whole +// calculation can be performed within the transform (as in ScalarMult and +// ScalarBaseMult). But even for Add and Double, it's faster to apply and +// reverse the transform than to operate in affine coordinates. + +import ( + "crypto/elliptic" + "io" + "math/big" + "sync" +) + +// A BitCurve represents a Koblitz Curve with a=0. +// See http://www.hyperelliptic.org/EFD/g1p/auto-shortw.html +type BitCurve struct { + Name string + P *big.Int // the order of the underlying field + N *big.Int // the order of the base point + B *big.Int // the constant of the BitCurve equation + Gx, Gy *big.Int // (x,y) of the base point + BitSize int // the size of the underlying field +} + +// Params returns the parameters of the given BitCurve (see BitCurve struct) +func (bitCurve *BitCurve) Params() (cp *elliptic.CurveParams) { + cp = new(elliptic.CurveParams) + cp.Name = bitCurve.Name + cp.P = bitCurve.P + cp.N = bitCurve.N + cp.Gx = bitCurve.Gx + cp.Gy = bitCurve.Gy + cp.BitSize = bitCurve.BitSize + return cp +} + +// IsOnCurve returns true if the given (x,y) lies on the BitCurve. +func (bitCurve *BitCurve) IsOnCurve(x, y *big.Int) bool { + // y² = x³ + b + y2 := new(big.Int).Mul(y, y) //y² + y2.Mod(y2, bitCurve.P) //y²%P + + x3 := new(big.Int).Mul(x, x) //x² + x3.Mul(x3, x) //x³ + + x3.Add(x3, bitCurve.B) //x³+B + x3.Mod(x3, bitCurve.P) //(x³+B)%P + + return x3.Cmp(y2) == 0 +} + +// affineFromJacobian reverses the Jacobian transform. See the comment at the +// top of the file. +func (bitCurve *BitCurve) affineFromJacobian(x, y, z *big.Int) (xOut, yOut *big.Int) { + if z.Cmp(big.NewInt(0)) == 0 { + panic("bitcurve: Can't convert to affine with Jacobian Z = 0") + } + // x = YZ^2 mod P + zinv := new(big.Int).ModInverse(z, bitCurve.P) + zinvsq := new(big.Int).Mul(zinv, zinv) + + xOut = new(big.Int).Mul(x, zinvsq) + xOut.Mod(xOut, bitCurve.P) + // y = YZ^3 mod P + zinvsq.Mul(zinvsq, zinv) + yOut = new(big.Int).Mul(y, zinvsq) + yOut.Mod(yOut, bitCurve.P) + return xOut, yOut +} + +// Add returns the sum of (x1,y1) and (x2,y2) +func (bitCurve *BitCurve) Add(x1, y1, x2, y2 *big.Int) (*big.Int, *big.Int) { + z := new(big.Int).SetInt64(1) + x, y, z := bitCurve.addJacobian(x1, y1, z, x2, y2, z) + return bitCurve.affineFromJacobian(x, y, z) +} + +// addJacobian takes two points in Jacobian coordinates, (x1, y1, z1) and +// (x2, y2, z2) and returns their sum, also in Jacobian form. +func (bitCurve *BitCurve) addJacobian(x1, y1, z1, x2, y2, z2 *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#addition-add-2007-bl + z1z1 := new(big.Int).Mul(z1, z1) + z1z1.Mod(z1z1, bitCurve.P) + z2z2 := new(big.Int).Mul(z2, z2) + z2z2.Mod(z2z2, bitCurve.P) + + u1 := new(big.Int).Mul(x1, z2z2) + u1.Mod(u1, bitCurve.P) + u2 := new(big.Int).Mul(x2, z1z1) + u2.Mod(u2, bitCurve.P) + h := new(big.Int).Sub(u2, u1) + if h.Sign() == -1 { + h.Add(h, bitCurve.P) + } + i := new(big.Int).Lsh(h, 1) + i.Mul(i, i) + j := new(big.Int).Mul(h, i) + + s1 := new(big.Int).Mul(y1, z2) + s1.Mul(s1, z2z2) + s1.Mod(s1, bitCurve.P) + s2 := new(big.Int).Mul(y2, z1) + s2.Mul(s2, z1z1) + s2.Mod(s2, bitCurve.P) + r := new(big.Int).Sub(s2, s1) + if r.Sign() == -1 { + r.Add(r, bitCurve.P) + } + r.Lsh(r, 1) + v := new(big.Int).Mul(u1, i) + + x3 := new(big.Int).Set(r) + x3.Mul(x3, x3) + x3.Sub(x3, j) + x3.Sub(x3, v) + x3.Sub(x3, v) + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Set(r) + v.Sub(v, x3) + y3.Mul(y3, v) + s1.Mul(s1, j) + s1.Lsh(s1, 1) + y3.Sub(y3, s1) + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Add(z1, z2) + z3.Mul(z3, z3) + z3.Sub(z3, z1z1) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Sub(z3, z2z2) + if z3.Sign() == -1 { + z3.Add(z3, bitCurve.P) + } + z3.Mul(z3, h) + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +// Double returns 2*(x,y) +func (bitCurve *BitCurve) Double(x1, y1 *big.Int) (*big.Int, *big.Int) { + z1 := new(big.Int).SetInt64(1) + return bitCurve.affineFromJacobian(bitCurve.doubleJacobian(x1, y1, z1)) +} + +// doubleJacobian takes a point in Jacobian coordinates, (x, y, z), and +// returns its double, also in Jacobian form. +func (bitCurve *BitCurve) doubleJacobian(x, y, z *big.Int) (*big.Int, *big.Int, *big.Int) { + // See http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-0.html#doubling-dbl-2009-l + + a := new(big.Int).Mul(x, x) //X1² + b := new(big.Int).Mul(y, y) //Y1² + c := new(big.Int).Mul(b, b) //B² + + d := new(big.Int).Add(x, b) //X1+B + d.Mul(d, d) //(X1+B)² + d.Sub(d, a) //(X1+B)²-A + d.Sub(d, c) //(X1+B)²-A-C + d.Mul(d, big.NewInt(2)) //2*((X1+B)²-A-C) + + e := new(big.Int).Mul(big.NewInt(3), a) //3*A + f := new(big.Int).Mul(e, e) //E² + + x3 := new(big.Int).Mul(big.NewInt(2), d) //2*D + x3.Sub(f, x3) //F-2*D + x3.Mod(x3, bitCurve.P) + + y3 := new(big.Int).Sub(d, x3) //D-X3 + y3.Mul(e, y3) //E*(D-X3) + y3.Sub(y3, new(big.Int).Mul(big.NewInt(8), c)) //E*(D-X3)-8*C + y3.Mod(y3, bitCurve.P) + + z3 := new(big.Int).Mul(y, z) //Y1*Z1 + z3.Mul(big.NewInt(2), z3) //3*Y1*Z1 + z3.Mod(z3, bitCurve.P) + + return x3, y3, z3 +} + +//TODO: double check if it is okay +// ScalarMult returns k*(Bx,By) where k is a number in big-endian form. +func (bitCurve *BitCurve) ScalarMult(Bx, By *big.Int, k []byte) (*big.Int, *big.Int) { + // We have a slight problem in that the identity of the group (the + // point at infinity) cannot be represented in (x, y) form on a finite + // machine. Thus the standard add/double algorithm has to be tweaked + // slightly: our initial state is not the identity, but x, and we + // ignore the first true bit in |k|. If we don't find any true bits in + // |k|, then we return nil, nil, because we cannot return the identity + // element. + + Bz := new(big.Int).SetInt64(1) + x := Bx + y := By + z := Bz + + seenFirstTrue := false + for _, byte := range k { + for bitNum := 0; bitNum < 8; bitNum++ { + if seenFirstTrue { + x, y, z = bitCurve.doubleJacobian(x, y, z) + } + if byte&0x80 == 0x80 { + if !seenFirstTrue { + seenFirstTrue = true + } else { + x, y, z = bitCurve.addJacobian(Bx, By, Bz, x, y, z) + } + } + byte <<= 1 + } + } + + if !seenFirstTrue { + return nil, nil + } + + return bitCurve.affineFromJacobian(x, y, z) +} + +// ScalarBaseMult returns k*G, where G is the base point of the group and k is +// an integer in big-endian form. +func (bitCurve *BitCurve) ScalarBaseMult(k []byte) (*big.Int, *big.Int) { + return bitCurve.ScalarMult(bitCurve.Gx, bitCurve.Gy, k) +} + +var mask = []byte{0xff, 0x1, 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f} + +//TODO: double check if it is okay +// GenerateKey returns a public/private key pair. The private key is generated +// using the given reader, which must return random data. +func (bitCurve *BitCurve) GenerateKey(rand io.Reader) (priv []byte, x, y *big.Int, err error) { + byteLen := (bitCurve.BitSize + 7) >> 3 + priv = make([]byte, byteLen) + + for x == nil { + _, err = io.ReadFull(rand, priv) + if err != nil { + return + } + // We have to mask off any excess bits in the case that the size of the + // underlying field is not a whole number of bytes. + priv[0] &= mask[bitCurve.BitSize%8] + // This is because, in tests, rand will return all zeros and we don't + // want to get the point at infinity and loop forever. + priv[1] ^= 0x42 + x, y = bitCurve.ScalarBaseMult(priv) + } + return +} + +// Marshal converts a point into the form specified in section 4.3.6 of ANSI +// X9.62. +func (bitCurve *BitCurve) Marshal(x, y *big.Int) []byte { + byteLen := (bitCurve.BitSize + 7) >> 3 + + ret := make([]byte, 1+2*byteLen) + ret[0] = 4 // uncompressed point + + xBytes := x.Bytes() + copy(ret[1+byteLen-len(xBytes):], xBytes) + yBytes := y.Bytes() + copy(ret[1+2*byteLen-len(yBytes):], yBytes) + return ret +} + +// Unmarshal converts a point, serialised by Marshal, into an x, y pair. On +// error, x = nil. +func (bitCurve *BitCurve) Unmarshal(data []byte) (x, y *big.Int) { + byteLen := (bitCurve.BitSize + 7) >> 3 + if len(data) != 1+2*byteLen { + return + } + if data[0] != 4 { // uncompressed form + return + } + x = new(big.Int).SetBytes(data[1 : 1+byteLen]) + y = new(big.Int).SetBytes(data[1+byteLen:]) + return +} + +//curve parameters taken from: +//http://www.secg.org/collateral/sec2_final.pdf + +var initonce sync.Once +var secp160k1 *BitCurve +var secp192k1 *BitCurve +var secp224k1 *BitCurve +var secp256k1 *BitCurve + +func initAll() { + initS160() + initS192() + initS224() + initS256() +} + +func initS160() { + // See SEC 2 section 2.4.1 + secp160k1 = new(BitCurve) + secp160k1.Name = "secp160k1" + secp160k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFAC73", 16) + secp160k1.N, _ = new(big.Int).SetString("0100000000000000000001B8FA16DFAB9ACA16B6B3", 16) + secp160k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000007", 16) + secp160k1.Gx, _ = new(big.Int).SetString("3B4C382CE37AA192A4019E763036F4F5DD4D7EBB", 16) + secp160k1.Gy, _ = new(big.Int).SetString("938CF935318FDCED6BC28286531733C3F03C4FEE", 16) + secp160k1.BitSize = 160 +} + +func initS192() { + // See SEC 2 section 2.5.1 + secp192k1 = new(BitCurve) + secp192k1.Name = "secp192k1" + secp192k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFEE37", 16) + secp192k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFE26F2FC170F69466A74DEFD8D", 16) + secp192k1.B, _ = new(big.Int).SetString("000000000000000000000000000000000000000000000003", 16) + secp192k1.Gx, _ = new(big.Int).SetString("DB4FF10EC057E9AE26B07D0280B7F4341DA5D1B1EAE06C7D", 16) + secp192k1.Gy, _ = new(big.Int).SetString("9B2F2F6D9C5628A7844163D015BE86344082AA88D95E2F9D", 16) + secp192k1.BitSize = 192 +} + +func initS224() { + // See SEC 2 section 2.6.1 + secp224k1 = new(BitCurve) + secp224k1.Name = "secp224k1" + secp224k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFE56D", 16) + secp224k1.N, _ = new(big.Int).SetString("010000000000000000000000000001DCE8D2EC6184CAF0A971769FB1F7", 16) + secp224k1.B, _ = new(big.Int).SetString("00000000000000000000000000000000000000000000000000000005", 16) + secp224k1.Gx, _ = new(big.Int).SetString("A1455B334DF099DF30FC28A169A467E9E47075A90F7E650EB6B7A45C", 16) + secp224k1.Gy, _ = new(big.Int).SetString("7E089FED7FBA344282CAFBD6F7E319F7C0B0BD59E2CA4BDB556D61A5", 16) + secp224k1.BitSize = 224 +} + +func initS256() { + // See SEC 2 section 2.7.1 + secp256k1 = new(BitCurve) + secp256k1.Name = "secp256k1" + secp256k1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFC2F", 16) + secp256k1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141", 16) + secp256k1.B, _ = new(big.Int).SetString("0000000000000000000000000000000000000000000000000000000000000007", 16) + secp256k1.Gx, _ = new(big.Int).SetString("79BE667EF9DCBBAC55A06295CE870B07029BFCDB2DCE28D959F2815B16F81798", 16) + secp256k1.Gy, _ = new(big.Int).SetString("483ADA7726A3C4655DA4FBFC0E1108A8FD17B448A68554199C47D08FFB10D4B8", 16) + secp256k1.BitSize = 256 +} + +// S160 returns a BitCurve which implements secp160k1 (see SEC 2 section 2.4.1) +func S160() *BitCurve { + initonce.Do(initAll) + return secp160k1 +} + +// S192 returns a BitCurve which implements secp192k1 (see SEC 2 section 2.5.1) +func S192() *BitCurve { + initonce.Do(initAll) + return secp192k1 +} + +// S224 returns a BitCurve which implements secp224k1 (see SEC 2 section 2.6.1) +func S224() *BitCurve { + initonce.Do(initAll) + return secp224k1 +} + +// S256 returns a BitCurve which implements bitcurves (see SEC 2 section 2.7.1) +func S256() *BitCurve { + initonce.Do(initAll) + return secp256k1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go new file mode 100644 index 0000000000..77fb8b9a04 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/brainpool.go @@ -0,0 +1,134 @@ +// Package brainpool implements Brainpool elliptic curves. +// Implementation of rcurves is from github.com/ebfe/brainpool +// Note that these curves are implemented with naive, non-constant time operations +// and are likely not suitable for enviroments where timing attacks are a concern. +package brainpool + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +var ( + once sync.Once + p256t1, p384t1, p512t1 *elliptic.CurveParams + p256r1, p384r1, p512r1 *rcurve +) + +func initAll() { + initP256t1() + initP384t1() + initP512t1() + initP256r1() + initP384r1() + initP512r1() +} + +func initP256t1() { + p256t1 = &elliptic.CurveParams{Name: "brainpoolP256t1"} + p256t1.P, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377", 16) + p256t1.N, _ = new(big.Int).SetString("A9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7", 16) + p256t1.B, _ = new(big.Int).SetString("662C61C430D84EA4FE66A7733D0B76B7BF93EBC4AF2F49256AE58101FEE92B04", 16) + p256t1.Gx, _ = new(big.Int).SetString("A3E8EB3CC1CFE7B7732213B23A656149AFA142C47AAFBC2B79A191562E1305F4", 16) + p256t1.Gy, _ = new(big.Int).SetString("2D996C823439C56D7F7B22E14644417E69BCB6DE39D027001DABE8F35B25C9BE", 16) + p256t1.BitSize = 256 +} + +func initP256r1() { + twisted := p256t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP256r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262", 16) + params.Gy, _ = new(big.Int).SetString("547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997", 16) + z, _ := new(big.Int).SetString("3E2D4BD9597B58639AE7AA669CAB9837CF5CF20A2C852D10F655668DFC150EF0", 16) + p256r1 = newrcurve(twisted, params, z) +} + +func initP384t1() { + p384t1 = &elliptic.CurveParams{Name: "brainpoolP384t1"} + p384t1.P, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53", 16) + p384t1.N, _ = new(big.Int).SetString("8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565", 16) + p384t1.B, _ = new(big.Int).SetString("7F519EADA7BDA81BD826DBA647910F8C4B9346ED8CCDC64E4B1ABD11756DCE1D2074AA263B88805CED70355A33B471EE", 16) + p384t1.Gx, _ = new(big.Int).SetString("18DE98B02DB9A306F2AFCD7235F72A819B80AB12EBD653172476FECD462AABFFC4FF191B946A5F54D8D0AA2F418808CC", 16) + p384t1.Gy, _ = new(big.Int).SetString("25AB056962D30651A114AFD2755AD336747F93475B7A1FCA3B88F2B6A208CCFE469408584DC2B2912675BF5B9E582928", 16) + p384t1.BitSize = 384 +} + +func initP384r1() { + twisted := p384t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP384r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E", 16) + params.Gy, _ = new(big.Int).SetString("8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315", 16) + z, _ := new(big.Int).SetString("41DFE8DD399331F7166A66076734A89CD0D2BCDB7D068E44E1F378F41ECBAE97D2D63DBC87BCCDDCCC5DA39E8589291C", 16) + p384r1 = newrcurve(twisted, params, z) +} + +func initP512t1() { + p512t1 = &elliptic.CurveParams{Name: "brainpoolP512t1"} + p512t1.P, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3", 16) + p512t1.N, _ = new(big.Int).SetString("AADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069", 16) + p512t1.B, _ = new(big.Int).SetString("7CBBBCF9441CFAB76E1890E46884EAE321F70C0BCB4981527897504BEC3E36A62BCDFA2304976540F6450085F2DAE145C22553B465763689180EA2571867423E", 16) + p512t1.Gx, _ = new(big.Int).SetString("640ECE5C12788717B9C1BA06CBC2A6FEBA85842458C56DDE9DB1758D39C0313D82BA51735CDB3EA499AA77A7D6943A64F7A3F25FE26F06B51BAA2696FA9035DA", 16) + p512t1.Gy, _ = new(big.Int).SetString("5B534BD595F5AF0FA2C892376C84ACE1BB4E3019B71634C01131159CAE03CEE9D9932184BEEF216BD71DF2DADF86A627306ECFF96DBB8BACE198B61E00F8B332", 16) + p512t1.BitSize = 512 +} + +func initP512r1() { + twisted := p512t1 + params := &elliptic.CurveParams{ + Name: "brainpoolP512r1", + P: twisted.P, + N: twisted.N, + BitSize: twisted.BitSize, + } + params.Gx, _ = new(big.Int).SetString("81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822", 16) + params.Gy, _ = new(big.Int).SetString("7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892", 16) + z, _ := new(big.Int).SetString("12EE58E6764838B69782136F0F2D3BA06E27695716054092E60A80BEDB212B64E585D90BCE13761F85C3F1D2A64E3BE8FEA2220F01EBA5EEB0F35DBD29D922AB", 16) + p512r1 = newrcurve(twisted, params, z) +} + +// P256t1 returns a Curve which implements Brainpool P256t1 (see RFC 5639, section 3.4) +func P256t1() elliptic.Curve { + once.Do(initAll) + return p256t1 +} + +// P256r1 returns a Curve which implements Brainpool P256r1 (see RFC 5639, section 3.4) +func P256r1() elliptic.Curve { + once.Do(initAll) + return p256r1 +} + +// P384t1 returns a Curve which implements Brainpool P384t1 (see RFC 5639, section 3.6) +func P384t1() elliptic.Curve { + once.Do(initAll) + return p384t1 +} + +// P384r1 returns a Curve which implements Brainpool P384r1 (see RFC 5639, section 3.6) +func P384r1() elliptic.Curve { + once.Do(initAll) + return p384r1 +} + +// P512t1 returns a Curve which implements Brainpool P512t1 (see RFC 5639, section 3.7) +func P512t1() elliptic.Curve { + once.Do(initAll) + return p512t1 +} + +// P512r1 returns a Curve which implements Brainpool P512r1 (see RFC 5639, section 3.7) +func P512r1() elliptic.Curve { + once.Do(initAll) + return p512r1 +} diff --git a/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go new file mode 100644 index 0000000000..2d5355085f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/brainpool/rcurve.go @@ -0,0 +1,83 @@ +package brainpool + +import ( + "crypto/elliptic" + "math/big" +) + +var _ elliptic.Curve = (*rcurve)(nil) + +type rcurve struct { + twisted elliptic.Curve + params *elliptic.CurveParams + z *big.Int + zinv *big.Int + z2 *big.Int + z3 *big.Int + zinv2 *big.Int + zinv3 *big.Int +} + +var ( + two = big.NewInt(2) + three = big.NewInt(3) +) + +func newrcurve(twisted elliptic.Curve, params *elliptic.CurveParams, z *big.Int) *rcurve { + zinv := new(big.Int).ModInverse(z, params.P) + return &rcurve{ + twisted: twisted, + params: params, + z: z, + zinv: zinv, + z2: new(big.Int).Exp(z, two, params.P), + z3: new(big.Int).Exp(z, three, params.P), + zinv2: new(big.Int).Exp(zinv, two, params.P), + zinv3: new(big.Int).Exp(zinv, three, params.P), + } +} + +func (curve *rcurve) toTwisted(x, y *big.Int) (*big.Int, *big.Int) { + var tx, ty big.Int + tx.Mul(x, curve.z2) + tx.Mod(&tx, curve.params.P) + ty.Mul(y, curve.z3) + ty.Mod(&ty, curve.params.P) + return &tx, &ty +} + +func (curve *rcurve) fromTwisted(tx, ty *big.Int) (*big.Int, *big.Int) { + var x, y big.Int + x.Mul(tx, curve.zinv2) + x.Mod(&x, curve.params.P) + y.Mul(ty, curve.zinv3) + y.Mod(&y, curve.params.P) + return &x, &y +} + +func (curve *rcurve) Params() *elliptic.CurveParams { + return curve.params +} + +func (curve *rcurve) IsOnCurve(x, y *big.Int) bool { + return curve.twisted.IsOnCurve(curve.toTwisted(x, y)) +} + +func (curve *rcurve) Add(x1, y1, x2, y2 *big.Int) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + tx2, ty2 := curve.toTwisted(x2, y2) + return curve.fromTwisted(curve.twisted.Add(tx1, ty1, tx2, ty2)) +} + +func (curve *rcurve) Double(x1, y1 *big.Int) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.Double(curve.toTwisted(x1, y1))) +} + +func (curve *rcurve) ScalarMult(x1, y1 *big.Int, scalar []byte) (x, y *big.Int) { + tx1, ty1 := curve.toTwisted(x1, y1) + return curve.fromTwisted(curve.twisted.ScalarMult(tx1, ty1, scalar)) +} + +func (curve *rcurve) ScalarBaseMult(scalar []byte) (x, y *big.Int) { + return curve.fromTwisted(curve.twisted.ScalarBaseMult(scalar)) +} \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go new file mode 100644 index 0000000000..6b6bc7aed0 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax.go @@ -0,0 +1,162 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package eax provides an implementation of the EAX +// (encrypt-authenticate-translate) mode of operation, as described in +// Bellare, Rogaway, and Wagner "THE EAX MODE OF OPERATION: A TWO-PASS +// AUTHENTICATED-ENCRYPTION SCHEME OPTIMIZED FOR SIMPLICITY AND EFFICIENCY." +// In FSE'04, volume 3017 of LNCS, 2004 +package eax + +import ( + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" +) + +const ( + defaultTagSize = 16 + defaultNonceSize = 16 +) + +type eax struct { + block cipher.Block // Only AES-{128, 192, 256} supported + tagSize int // At least 12 bytes recommended + nonceSize int +} + +func (e *eax) NonceSize() int { + return e.nonceSize +} + +func (e *eax) Overhead() int { + return e.tagSize +} + +// NewEAX returns an EAX instance with AES-{KEYLENGTH} and default nonce and +// tag lengths. Supports {128, 192, 256}- bit key length. +func NewEAX(block cipher.Block) (cipher.AEAD, error) { + return NewEAXWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewEAXWithNonceAndTagSize returns an EAX instance with AES-{keyLength} and +// given nonce and tag lengths in bytes. Panics on zero nonceSize and +// exceedingly long tags. +// +// It is recommended to use at least 12 bytes as tag length (see, for instance, +// NIST SP 800-38D). +// +// Only to be used for compatibility with existing cryptosystems with +// non-standard parameters. For all other cases, prefer NewEAX. +func NewEAXWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if nonceSize < 1 { + return nil, eaxError("Cannot initialize EAX with nonceSize = 0") + } + if tagSize > block.BlockSize() { + return nil, eaxError("Custom tag length exceeds blocksize") + } + return &eax{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + }, nil +} + +func (e *eax) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext) + e.tagSize) + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + + // Encrypt message using CTR mode and omacNonce as IV + ctr := cipher.NewCTR(e.block, omacNonce) + ciphertextData := out[:len(plaintext)] + ctr.XORKeyStream(ciphertextData, plaintext) + + omacCiphertext := e.omacT(2, ciphertextData) + + tag := out[len(plaintext):] + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + return ret +} + +func (e* eax) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > e.nonceSize { + panic("crypto/eax: Nonce too long for this instance") + } + if len(ciphertext) < e.tagSize { + return nil, eaxError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - e.tagSize + + // Compute tag + omacNonce := e.omacT(0, nonce) + omacAdata := e.omacT(1, adata) + omacCiphertext := e.omacT(2, ciphertext[:sep]) + + tag := make([]byte, e.tagSize) + for i := 0; i < e.tagSize; i++ { + tag[i] = omacCiphertext[i] ^ omacNonce[i] ^ omacAdata[i] + } + + // Compare tags + if subtle.ConstantTimeCompare(ciphertext[sep:], tag) != 1 { + return nil, eaxError("Tag authentication failed") + } + + // Decrypt ciphertext + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ctr := cipher.NewCTR(e.block, omacNonce) + ctr.XORKeyStream(out, ciphertext[:sep]) + + return ret[:sep], nil +} + +// Tweakable OMAC - Calls OMAC_K([t]_n || plaintext) +func (e *eax) omacT(t byte, plaintext []byte) []byte { + blockSize := e.block.BlockSize() + byteT := make([]byte, blockSize) + byteT[blockSize-1] = t + concat := append(byteT, plaintext...) + return e.omac(concat) +} + +func (e *eax) omac(plaintext []byte) []byte { + blockSize := e.block.BlockSize() + // L ← E_K(0^n); B ← 2L; P ← 4L + L := make([]byte, blockSize) + e.block.Encrypt(L, L) + B := byteutil.GfnDouble(L) + P := byteutil.GfnDouble(B) + + // CBC with IV = 0 + cbc := cipher.NewCBCEncrypter(e.block, make([]byte, blockSize)) + padded := e.pad(plaintext, B, P) + cbcCiphertext := make([]byte, len(padded)) + cbc.CryptBlocks(cbcCiphertext, padded) + + return cbcCiphertext[len(cbcCiphertext)-blockSize:] +} + +func (e *eax) pad(plaintext, B, P []byte) []byte { + // if |M| in {n, 2n, 3n, ...} + blockSize := e.block.BlockSize() + if len(plaintext) != 0 && len(plaintext)%blockSize == 0 { + return byteutil.RightXor(plaintext, B) + } + + // else return (M || 1 || 0^(n−1−(|M| % n))) xor→ P + ending := make([]byte, blockSize-len(plaintext)%blockSize) + ending[0] = 0x80 + padded := append(plaintext, ending...) + return byteutil.RightXor(padded, P) +} + +func eaxError(err string) error { + return errors.New("crypto/eax: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go new file mode 100644 index 0000000000..ddb53d0790 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/eax_test_vectors.go @@ -0,0 +1,58 @@ +package eax + +// Test vectors from +// https://web.cs.ucdavis.edu/~rogaway/papers/eax.pdf +var testVectors = []struct { + msg, key, nonce, header, ciphertext string +}{ + {"", + "233952DEE4D5ED5F9B9C6D6FF80FF478", + "62EC67F9C3A4A407FCB2A8C49031A8B3", + "6BFB914FD07EAE6B", + "E037830E8389F27B025A2D6527E79D01"}, + {"F7FB", + "91945D3F4DCBEE0BF45EF52255F095A4", + "BECAF043B0A23D843194BA972C66DEBD", + "FA3BFD4806EB53FA", + "19DD5C4C9331049D0BDAB0277408F67967E5"}, + {"1A47CB4933", + "01F74AD64077F2E704C0F60ADA3DD523", + "70C3DB4F0D26368400A10ED05D2BFF5E", + "234A3463C1264AC6", + "D851D5BAE03A59F238A23E39199DC9266626C40F80"}, + {"481C9E39B1", + "D07CF6CBB7F313BDDE66B727AFD3C5E8", + "8408DFFF3C1A2B1292DC199E46B7D617", + "33CCE2EABFF5A79D", + "632A9D131AD4C168A4225D8E1FF755939974A7BEDE"}, + {"40D0C07DA5E4", + "35B6D0580005BBC12B0587124557D2C2", + "FDB6B06676EEDC5C61D74276E1F8E816", + "AEB96EAEBE2970E9", + "071DFE16C675CB0677E536F73AFE6A14B74EE49844DD"}, + {"4DE3B35C3FC039245BD1FB7D", + "BD8E6E11475E60B268784C38C62FEB22", + "6EAC5C93072D8E8513F750935E46DA1B", + "D4482D1CA78DCE0F", + "835BB4F15D743E350E728414ABB8644FD6CCB86947C5E10590210A4F"}, + {"8B0A79306C9CE7ED99DAE4F87F8DD61636", + "7C77D6E813BED5AC98BAA417477A2E7D", + "1A8C98DCD73D38393B2BF1569DEEFC19", + "65D2017990D62528", + "02083E3979DA014812F59F11D52630DA30137327D10649B0AA6E1C181DB617D7F2"}, + {"1BDA122BCE8A8DBAF1877D962B8592DD2D56", + "5FFF20CAFAB119CA2FC73549E20F5B0D", + "DDE59B97D722156D4D9AFF2BC7559826", + "54B9F04E6A09189A", + "2EC47B2C4954A489AFC7BA4897EDCDAE8CC33B60450599BD02C96382902AEF7F832A"}, + {"6CF36720872B8513F6EAB1A8A44438D5EF11", + "A4A4782BCFFD3EC5E7EF6D8C34A56123", + "B781FCF2F75FA5A8DE97A9CA48E522EC", + "899A175897561D7E", + "0DE18FD0FDD91E7AF19F1D8EE8733938B1E8E7F6D2231618102FDB7FE55FF1991700"}, + {"CA40D7446E545FFAED3BD12A740A659FFBBB3CEAB7", + "8395FCF1E95BEBD697BD010BC766AAC3", + "22E7ADD93CFC6393C57EC0B3C17D6B44", + "126735FCC320D25A", + "CB8920F87A6C75CFF39627B56E3ED197C552D295A7CFC46AFC253B4652B1AF3795B124AB6E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go new file mode 100644 index 0000000000..4eb19f28d9 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/eax/random_vectors.go @@ -0,0 +1,131 @@ +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package eax + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + {"DFDE093F36B0356E5A81F609786982E3", + "1D8AC604419001816905BA72B14CED7E", + "152A1517A998D7A24163FCDD146DE81AC347C8B97088F502093C1ABB8F6E33D9A219C34D7603A18B1F5ABE02E56661B7D7F67E81EC08C1302EF38D80A859486D450E94A4F26AD9E68EEBBC0C857A0FC5CF9E641D63D565A7E361BC8908F5A8DC8FD6", + "1C8EAAB71077FE18B39730A3156ADE29C5EE824C7EE86ED2A253B775603FB237116E654F6FEC588DD27F523A0E01246FE73FE348491F2A8E9ABC6CA58D663F71CDBCF4AD798BE46C42AE6EE8B599DB44A1A48D7BBBBA0F7D2750181E1C5E66967F7D57CBD30AFBDA5727", + "79E7E150934BBEBF7013F61C60462A14D8B15AF7A248AFB8A344EF021C1500E16666891D6E973D8BB56B71A371F12CA34660C4410C016982B20F547E3762A58B7BF4F20236CADCF559E2BE7D783B13723B2741FC7CDC8997D839E39A3DDD2BADB96743DD7049F1BDB0516A262869915B3F70498AFB7B191BF960"}, + {"F10619EF02E5D94D7550EB84ED364A21", + "8DC0D4F2F745BBAE835CC5574B942D20", + "FE561358F2E8DF7E1024FF1AE9A8D36EBD01352214505CB99D644777A8A1F6027FA2BDBFC529A9B91136D5F2416CFC5F0F4EC3A1AFD32BDDA23CA504C5A5CB451785FABF4DFE4CD50D817491991A60615B30286361C100A95D1712F2A45F8E374461F4CA2B", + "D7B5A971FC219631D30EFC3664AE3127D9CF3097DAD9C24AC7905D15E8D9B25B026B31D68CAE00975CDB81EB1FD96FD5E1A12E2BB83FA25F1B1D91363457657FC03875C27F2946C5", + "2F336ED42D3CC38FC61660C4CD60BA4BD438B05F5965D8B7B399D2E7167F5D34F792D318F94DB15D67463AC449E13D568CC09BFCE32A35EE3EE96A041927680AE329811811E27F2D1E8E657707AF99BA96D13A478D695D59"}, + {"429F514EFC64D98A698A9247274CFF45", + "976AA5EB072F912D126ACEBC954FEC38", + "A71D89DC5B6CEDBB7451A27C3C2CAE09126DB4C421", + "5632FE62AB1DC549D54D3BC3FC868ACCEDEFD9ECF5E9F8", + "848AE4306CA8C7F416F8707625B7F55881C0AB430353A5C967CDA2DA787F581A70E34DBEBB2385"}, + {"398138F309085F47F8457CDF53895A63", + "F8A8A7F2D28E5FFF7BBC2F24353F7A36", + "5D633C21BA7764B8855CAB586F3746E236AD486039C83C6B56EFA9C651D38A41D6B20DAEE3418BFEA44B8BD6", + "A3BBAA91920AF5E10659818B1B3B300AC79BFC129C8329E75251F73A66D3AE0128EB91D5031E0A65C329DB7D1E9C0493E268", + "D078097267606E5FB07CFB7E2B4B718172A82C6A4CEE65D549A4DFB9838003BD2FBF64A7A66988AC1A632FD88F9E9FBB57C5A78AD2E086EACBA3DB68511D81C2970A"}, + {"7A4151EBD3901B42CBA45DAFB2E931BA", + "0FC88ACEE74DD538040321C330974EB8", + "250464FB04733BAB934C59E6AD2D6AE8D662CBCFEFBE61E5A308D4211E58C4C25935B72C69107722E946BFCBF416796600542D76AEB73F2B25BF53BAF97BDEB36ED3A7A51C31E7F170EB897457E7C17571D1BA0A908954E9", + "88C41F3EBEC23FAB8A362D969CAC810FAD4F7CA6A7F7D0D44F060F92E37E1183768DD4A8C733F71C96058D362A39876D183B86C103DE", + "74A25B2182C51096D48A870D80F18E1CE15867778E34FCBA6BD7BFB3739FDCD42AD0F2D9F4EBA29085285C6048C15BCE5E5166F1F962D3337AA88E6062F05523029D0A7F0BF9"}, + {"BFB147E1CD5459424F8C0271FC0E0DC5", + "EABCC126442BF373969EA3015988CC45", + "4C0880E1D71AA2C7", + "BE1B5EC78FBF73E7A6682B21BA7E0E5D2D1C7ABE", + "5660D7C1380E2F306895B1402CB2D6C37876504276B414D120F4CF92FDDDBB293A238EA0"}, + {"595DD6F52D18BC2CA8EB4EDAA18D9FA3", + "0F84B5D36CF4BC3B863313AF3B4D2E97", + "30AE6CC5F99580F12A779D98BD379A60948020C0B6FBD5746B30BA3A15C6CD33DAF376C70A9F15B6C0EB410A93161F7958AE23", + "8EF3687A1642B070970B0B91462229D1D76ABC154D18211F7152AA9FF368", + "317C1DDB11417E5A9CC4DDE7FDFF6659A5AC4B31DE025212580A05CDAC6024D3E4AE7C2966E52B9129E9ECDBED86"}, + {"44E6F2DC8FDC778AD007137D11410F50", + "270A237AD977F7187AA6C158A0BAB24F", + "509B0F0EB12E2AA5C5BA2DE553C07FAF4CE0C9E926531AA709A3D6224FCB783ACCF1559E10B1123EBB7D52E8AB54E6B5352A9ED0D04124BF0E9D9BACFD7E32B817B2E625F5EE94A64EDE9E470DE7FE6886C19B294F9F828209FE257A78", + "8B3D7815DF25618A5D0C55A601711881483878F113A12EC36CF64900549A3199555528559DC118F789788A55FAFD944E6E99A9CA3F72F238CD3F4D88223F7A745992B3FAED1848", + "1CC00D79F7AD82FDA71B58D286E5F34D0CC4CEF30704E771CC1E50746BDF83E182B078DB27149A42BAE619DF0F85B0B1090AD55D3B4471B0D6F6ECCD09C8F876B30081F0E7537A9624F8AAF29DA85E324122EFB4D68A56"}, + {"BB7BC352A03044B4428D8DBB4B0701FDEC4649FD17B81452", + "8B4BBE26CCD9859DCD84884159D6B0A4", + "2212BEB0E78E0F044A86944CF33C8D5C80D9DBE1034BF3BCF73611835C7D3A52F5BD2D81B68FD681B68540A496EE5DA16FD8AC8824E60E1EC2042BE28FB0BFAD4E4B03596446BDD8C37D936D9B3D5295BE19F19CF5ACE1D33A46C952CE4DE5C12F92C1DD051E04AEED", + "9037234CC44FFF828FABED3A7084AF40FA7ABFF8E0C0EFB57A1CC361E18FC4FAC1AB54F3ABFE9FF77263ACE16C3A", + "A9391B805CCD956081E0B63D282BEA46E7025126F1C1631239C33E92AA6F92CD56E5A4C56F00FF9658E93D48AF4EF0EF81628E34AD4DB0CDAEDCD2A17EE7"}, + {"99C0AD703196D2F60A74E6B378B838B31F82EA861F06FC4E", + "92745C018AA708ECFEB1667E9F3F1B01", + "828C69F376C0C0EC651C67749C69577D589EE39E51404D80EBF70C8660A8F5FD375473F4A7C611D59CB546A605D67446CE2AA844135FCD78BB5FBC90222A00D42920BB1D7EEDFB0C4672554F583EF23184F89063CDECBE482367B5F9AF3ACBC3AF61392BD94CBCD9B64677", + "A879214658FD0A5B0E09836639BF82E05EC7A5EF71D4701934BDA228435C68AC3D5CEB54997878B06A655EEACEFB1345C15867E7FE6C6423660C8B88DF128EBD6BCD85118DBAE16E9252FFB204324E5C8F38CA97759BDBF3CB0083", + "51FE87996F194A2585E438B023B345439EA60D1AEBED4650CDAF48A4D4EEC4FC77DC71CC4B09D3BEEF8B7B7AF716CE2B4EFFB3AC9E6323C18AC35E0AA6E2BBBC8889490EB6226C896B0D105EAB42BFE7053CCF00ED66BA94C1BA09A792AA873F0C3B26C5C5F9A936E57B25"}, + {"7086816D00D648FB8304AA8C9E552E1B69A9955FB59B25D1", + "0F45CF7F0BF31CCEB85D9DA10F4D749F", + "93F27C60A417D9F0669E86ACC784FC8917B502DAF30A6338F11B30B94D74FEFE2F8BE1BBE2EAD10FAB7EED3C6F72B7C3ECEE1937C32ED4970A6404E139209C05", + "877F046601F3CBE4FB1491943FA29487E738F94B99AF206262A1D6FF856C9AA0B8D4D08A54370C98F8E88FA3DCC2B14C1F76D71B2A4C7963AEE8AF960464C5BEC8357AD00DC8", + "FE96906B895CE6A8E72BC72344E2C8BB3C63113D70EAFA26C299BAFE77A8A6568172EB447FB3E86648A0AF3512DEB1AAC0819F3EC553903BF28A9FB0F43411237A774BF9EE03E445D280FBB9CD12B9BAAB6EF5E52691"}, + {"062F65A896D5BF1401BADFF70E91B458E1F9BD4888CB2E4D", + "5B11EA1D6008EBB41CF892FCA5B943D1", + "BAF4FF5C8242", + "A8870E091238355984EB2F7D61A865B9170F440BFF999A5993DD41A10F4440D21FF948DDA2BF663B2E03AC3324492DC5E40262ECC6A65C07672353BE23E7FB3A9D79FF6AA38D97960905A38DECC312CB6A59E5467ECF06C311CD43ADC0B543EDF34FE8BE611F176460D5627CA51F8F8D9FED71F55C", + "B10E127A632172CF8AA7539B140D2C9C2590E6F28C3CB892FC498FCE56A34F732FBFF32E79C7B9747D9094E8635A0C084D6F0247F9768FB5FF83493799A9BEC6C39572120C40E9292C8C947AE8573462A9108C36D9D7112E6995AE5867E6C8BB387D1C5D4BEF524F391B9FD9F0A3B4BFA079E915BCD920185CFD38D114C558928BD7D47877"}, + {"38A8E45D6D705A11AF58AED5A1344896998EACF359F2E26A", + "FD82B5B31804FF47D44199B533D0CF84", + "DE454D4E62FE879F2050EE3E25853623D3E9AC52EEC1A1779A48CFAF5ECA0BFDE44749391866D1", + "B804", + "164BB965C05EBE0931A1A63293EDF9C38C27"}, + {"34C33C97C6D7A0850DA94D78A58DC61EC717CD7574833068", + "343BE00DA9483F05C14F2E9EB8EA6AE8", + "78312A43EFDE3CAE34A65796FF059A3FE15304EEA5CF1D9306949FE5BF3349D4977D4EBE76C040FE894C5949E4E4D6681153DA87FB9AC5062063CA2EA183566343362370944CE0362D25FC195E124FD60E8682E665D13F2229DDA3E4B2CB1DCA", + "CC11BB284B1153578E4A5ED9D937B869DAF00F5B1960C23455CA9CC43F486A3BE0B66254F1041F04FDF459C8640465B6E1D2CF899A381451E8E7FCB50CF87823BE77E24B132BBEEDC72E53369B275E1D8F49ECE59F4F215230AC4FE133FC80E4F634EE80BA4682B62C86", + "E7F703DC31A95E3A4919FF957836CB76C063D81702AEA4703E1C2BF30831E58C4609D626EC6810E12EAA5B930F049FF9EFC22C3E3F1EBD4A1FB285CB02A1AC5AD46B425199FC0A85670A5C4E3DAA9636C8F64C199F42F18AAC8EA7457FD377F322DD7752D7D01B946C8F0A97E6113F0D50106F319AFD291AAACE"}, + {"C6ECF7F053573E403E61B83052A343D93CBCC179D1E835BE", + "E280E13D7367042E3AA09A80111B6184", + "21486C9D7A9647", + "5F2639AFA6F17931853791CD8C92382BBB677FD72D0AB1A080D0E49BFAA21810E963E4FACD422E92F65CBFAD5884A60CD94740DF31AF02F95AA57DA0C4401B0ED906", + "5C51DB20755302070C45F52E50128A67C8B2E4ED0EACB7E29998CCE2E8C289DD5655913EC1A51CC3AABE5CDC2402B2BE7D6D4BF6945F266FBD70BA9F37109067157AE7530678B45F64475D4EBFCB5FFF46A5"}, + {"5EC6CF7401BC57B18EF154E8C38ACCA8959E57D2F3975FF5", + "656B41CB3F9CF8C08BAD7EBFC80BD225", + "6B817C2906E2AF425861A7EF59BA5801F143EE2A139EE72697CDE168B4", + "2C0E1DDC9B1E5389BA63845B18B1F8A1DB062037151BCC56EF7C21C0BB4DAE366636BBA975685D7CC5A94AFBE89C769016388C56FB7B57CE750A12B718A8BDCF70E80E8659A8330EFC8F86640F21735E8C80E23FE43ABF23507CE3F964AE4EC99D", + "ED780CF911E6D1AA8C979B889B0B9DC1ABE261832980BDBFB576901D9EF5AB8048998E31A15BE54B3E5845A4D136AD24D0BDA1C3006168DF2F8AC06729CB0818867398150020131D8F04EDF1923758C9EABB5F735DE5EA1758D4BC0ACFCA98AFD202E9839B8720253693B874C65586C6F0"}, + {"C92F678EB2208662F5BCF3403EC05F5961E957908A3E79421E1D25FC19054153", + "DA0F3A40983D92F2D4C01FED33C7A192", + "2B6E9D26DB406A0FAB47608657AA10EFC2B4AA5F459B29FF85AC9A40BFFE7AEB04F77E9A11FAAA116D7F6D4DA417671A9AB02C588E0EF59CB1BFB4B1CC931B63A3B3A159FCEC97A04D1E6F0C7E6A9CEF6B0ABB04758A69F1FE754DF4C2610E8C46B6CF413BDB31351D55BEDCB7B4A13A1C98E10984475E0F2F957853", + "F37326A80E08", + "83519E53E321D334F7C10B568183775C0E9AAE55F806"}, + {"6847E0491BE57E72995D186D50094B0B3593957A5146798FCE68B287B2FB37B5", + "3EE1182AEBB19A02B128F28E1D5F7F99", + "D9F35ABB16D776CE", + "DB7566ED8EA95BDF837F23DB277BAFBC5E70D1105ADFD0D9EF15475051B1EF94709C67DCA9F8D5", + "2CDCED0C9EBD6E2A508822A685F7DCD1CDD99E7A5FCA786C234E7F7F1D27EC49751AD5DCFA30C5EDA87C43CAE3B919B6BBCFE34C8EDA59"}, + {"82B019673642C08388D3E42075A4D5D587558C229E4AB8F660E37650C4C41A0A", + "336F5D681E0410FAE7B607246092C6DC", + "D430CBD8FE435B64214E9E9CDC5DE99D31CFCFB8C10AA0587A49DF276611", + "998404153AD77003E1737EDE93ED79859EE6DCCA93CB40C4363AA817ABF2DBBD46E42A14A7183B6CC01E12A577888141363D0AE011EB6E8D28C0B235", + "9BEF69EEB60BD3D6065707B7557F25292A8872857CFBD24F2F3C088E4450995333088DA50FD9121221C504DF1D0CD5EFE6A12666C5D5BB12282CF4C19906E9CFAB97E9BDF7F49DC17CFC384B"}, + {"747B2E269B1859F0622C15C8BAD6A725028B1F94B8DB7326948D1E6ED663A8BC", + "AB91F7245DDCE3F1C747872D47BE0A8A", + "3B03F786EF1DDD76E1D42646DA4CD2A5165DC5383CE86D1A0B5F13F910DC278A4E451EE0192CBA178E13B3BA27FDC7840DF73D2E104B", + "6B803F4701114F3E5FE21718845F8416F70F626303F545BE197189E0A2BA396F37CE06D389EB2658BC7D56D67868708F6D0D32", + "1570DDB0BCE75AA25D1957A287A2C36B1A5F2270186DA81BA6112B7F43B0F3D1D0ED072591DCF1F1C99BBB25621FC39B896FF9BD9413A2845363A9DCD310C32CF98E57"}, + {"02E59853FB29AEDA0FE1C5F19180AD99A12FF2F144670BB2B8BADF09AD812E0A", + "C691294EF67CD04D1B9242AF83DD1421", + "879334DAE3", + "1E17F46A98FEF5CBB40759D95354", + "FED8C3FF27DDF6313AED444A2985B36CBA268AAD6AAC563C0BA28F6DB5DB"}, + {"F6C1FB9B4188F2288FF03BD716023198C3582CF2A037FC2F29760916C2B7FCDB", + "4228DA0678CA3534588859E77DFF014C", + "D8153CAF35539A61DD8D05B3C9B44F01E564FB9348BCD09A1C23B84195171308861058F0A3CD2A55B912A3AAEE06FF4D356C77275828F2157C2FC7C115DA39E443210CCC56BEDB0CC99BBFB227ABD5CC454F4E7F547C7378A659EEB6A7E809101A84F866503CB18D4484E1FA09B3EC7FC75EB2E35270800AA7", + "23B660A779AD285704B12EC1C580387A47BEC7B00D452C6570", + "5AA642BBABA8E49849002A2FAF31DB8FC7773EFDD656E469CEC19B3206D4174C9A263D0A05484261F6"}, + {"8FF6086F1FADB9A3FBE245EAC52640C43B39D43F89526BB5A6EBA47710931446", + "943188480C99437495958B0AE4831AA9", + "AD5CD0BDA426F6EBA23C8EB23DC73FF9FEC173355EDBD6C9344C4C4383F211888F7CE6B29899A6801DF6B38651A7C77150941A", + "80CD5EA8D7F81DDF5070B934937912E8F541A5301877528EB41AB60C020968D459960ED8FB73083329841A", + "ABAE8EB7F36FCA2362551E72DAC890BA1BB6794797E0FC3B67426EC9372726ED4725D379EA0AC9147E48DCD0005C502863C2C5358A38817C8264B5"}, + {"A083B54E6B1FE01B65D42FCD248F97BB477A41462BBFE6FD591006C022C8FD84", + "B0490F5BD68A52459556B3749ACDF40E", + "8892E047DA5CFBBDF7F3CFCBD1BD21C6D4C80774B1826999234394BD3E513CC7C222BB40E1E3140A152F19B3802F0D036C24A590512AD0E8", + "D7B15752789DC94ED0F36778A5C7BBB207BEC32BAC66E702B39966F06E381E090C6757653C3D26A81EC6AD6C364D66867A334C91BB0B8A8A4B6EACDF0783D09010AEBA2DD2062308FE99CC1F", + "C071280A732ADC93DF272BF1E613B2BB7D46FC6665EF2DC1671F3E211D6BDE1D6ADDD28DF3AA2E47053FC8BB8AE9271EC8BC8B2CFFA320D225B451685B6D23ACEFDD241FE284F8ADC8DB07F456985B14330BBB66E0FB212213E05B3E"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go new file mode 100644 index 0000000000..a6bdf51232 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/internal/byteutil/byteutil.go @@ -0,0 +1,92 @@ +// Copyright (C) 2019 ProtonTech AG +// This file contains necessary tools for the aex and ocb packages. +// +// These functions SHOULD NOT be used elsewhere, since they are optimized for +// specific input nature in the EAX and OCB modes of operation. + +package byteutil + +// GfnDouble computes 2 * input in the field of 2^n elements. +// The irreducible polynomial in the finite field for n=128 is +// x^128 + x^7 + x^2 + x + 1 (equals 0x87) +// Constant-time execution in order to avoid side-channel attacks +func GfnDouble(input []byte) []byte { + if len(input) != 16 { + panic("Doubling in GFn only implemented for n = 128") + } + // If the first bit is zero, return 2L = L << 1 + // Else return (L << 1) xor 0^120 10000111 + shifted := ShiftBytesLeft(input) + shifted[15] ^= ((input[0] >> 7) * 0x87) + return shifted +} + +// ShiftBytesLeft outputs the byte array corresponding to x << 1 in binary. +func ShiftBytesLeft(x []byte) []byte { + l := len(x) + dst := make([]byte, l) + for i := 0; i < l-1; i++ { + dst[i] = (x[i] << 1) | (x[i+1] >> 7) + } + dst[l-1] = x[l-1] << 1 + return dst +} + +// ShiftNBytesLeft puts in dst the byte array corresponding to x << n in binary. +func ShiftNBytesLeft(dst, x []byte, n int) { + // Erase first n / 8 bytes + copy(dst, x[n/8:]) + + // Shift the remaining n % 8 bits + bits := uint(n % 8) + l := len(dst) + for i := 0; i < l-1; i++ { + dst[i] = (dst[i] << bits) | (dst[i+1] >> uint(8 - bits)) + } + dst[l-1] = dst[l-1] << bits + + // Append trailing zeroes + dst = append(dst, make([]byte, n/8)...) +} + +// XorBytesMut assumes equal input length, replaces X with X XOR Y +func XorBytesMut(X, Y []byte) { + for i := 0; i < len(X); i++ { + X[i] ^= Y[i] + } +} + + +// XorBytes assumes equal input length, puts X XOR Y into Z +func XorBytes(Z, X, Y []byte) { + for i := 0; i < len(X); i++ { + Z[i] = X[i] ^ Y[i] + } +} + +// RightXor XORs smaller input (assumed Y) at the right of the larger input (assumed X) +func RightXor(X, Y []byte) []byte { + offset := len(X) - len(Y) + xored := make([]byte, len(X)); + copy(xored, X) + for i := 0; i < len(Y); i++ { + xored[offset + i] ^= Y[i] + } + return xored +} + +// SliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func SliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go new file mode 100644 index 0000000000..7f78cfa759 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/ocb.go @@ -0,0 +1,317 @@ +// Copyright (C) 2019 ProtonTech AG + +// Package ocb provides an implementation of the OCB (offset codebook) mode of +// operation, as described in RFC-7253 of the IRTF and in Rogaway, Bellare, +// Black and Krovetz - OCB: A BLOCK-CIPHER MODE OF OPERATION FOR EFFICIENT +// AUTHENTICATED ENCRYPTION (2003). +// Security considerations (from RFC-7253): A private key MUST NOT be used to +// encrypt more than 2^48 blocks. Tag length should be at least 12 bytes (a +// brute-force forging adversary succeeds after 2^{tag length} attempts). A +// single key SHOULD NOT be used to decrypt ciphertext with different tag +// lengths. Nonces need not be secret, but MUST NOT be reused. +// This package only supports underlying block ciphers with 128-bit blocks, +// such as AES-{128, 192, 256}, but may be extended to other sizes. +package ocb + +import ( + "bytes" + "crypto/cipher" + "crypto/subtle" + "errors" + "github.com/ProtonMail/go-crypto/internal/byteutil" + "math/bits" +) + +type ocb struct { + block cipher.Block + tagSize int + nonceSize int + mask mask + // Optimized en/decrypt: For each nonce N used to en/decrypt, the 'Ktop' + // internal variable can be reused for en/decrypting with nonces sharing + // all but the last 6 bits with N. The prefix of the first nonce used to + // compute the new Ktop, and the Ktop value itself, are stored in + // reusableKtop. If using incremental nonces, this saves one block cipher + // call every 63 out of 64 OCB encryptions, and stores one nonce and one + // output of the block cipher in memory only. + reusableKtop reusableKtop +} + +type mask struct { + // L_*, L_$, (L_i)_{i ∈ N} + lAst []byte + lDol []byte + L [][]byte +} + +type reusableKtop struct { + noncePrefix []byte + Ktop []byte +} + +const ( + defaultTagSize = 16 + defaultNonceSize = 15 +) + +const ( + enc = iota + dec +) + +func (o *ocb) NonceSize() int { + return o.nonceSize +} + +func (o *ocb) Overhead() int { + return o.tagSize +} + +// NewOCB returns an OCB instance with the given block cipher and default +// tag and nonce sizes. +func NewOCB(block cipher.Block) (cipher.AEAD, error) { + return NewOCBWithNonceAndTagSize(block, defaultNonceSize, defaultTagSize) +} + +// NewOCBWithNonceAndTagSize returns an OCB instance with the given block +// cipher, nonce length, and tag length. Panics on zero nonceSize and +// exceedingly long tag size. +// +// It is recommended to use at least 12 bytes as tag length. +func NewOCBWithNonceAndTagSize( + block cipher.Block, nonceSize, tagSize int) (cipher.AEAD, error) { + if block.BlockSize() != 16 { + return nil, ocbError("Block cipher must have 128-bit blocks") + } + if nonceSize < 1 { + return nil, ocbError("Incorrect nonce length") + } + if nonceSize >= block.BlockSize() { + return nil, ocbError("Nonce length exceeds blocksize - 1") + } + if tagSize > block.BlockSize() { + return nil, ocbError("Custom tag length exceeds blocksize") + } + return &ocb{ + block: block, + tagSize: tagSize, + nonceSize: nonceSize, + mask: initializeMaskTable(block), + reusableKtop: reusableKtop{ + noncePrefix: nil, + Ktop: nil, + }, + }, nil +} + +func (o *ocb) Seal(dst, nonce, plaintext, adata []byte) []byte { + if len(nonce) > o.nonceSize { + panic("crypto/ocb: Incorrect nonce length given to OCB") + } + ret, out := byteutil.SliceForAppend(dst, len(plaintext)+o.tagSize) + o.crypt(enc, out, nonce, adata, plaintext) + return ret +} + +func (o *ocb) Open(dst, nonce, ciphertext, adata []byte) ([]byte, error) { + if len(nonce) > o.nonceSize { + panic("Nonce too long for this instance") + } + if len(ciphertext) < o.tagSize { + return nil, ocbError("Ciphertext shorter than tag length") + } + sep := len(ciphertext) - o.tagSize + ret, out := byteutil.SliceForAppend(dst, len(ciphertext)) + ciphertextData := ciphertext[:sep] + tag := ciphertext[sep:] + o.crypt(dec, out, nonce, adata, ciphertextData) + if subtle.ConstantTimeCompare(ret[sep:], tag) == 1 { + ret = ret[:sep] + return ret, nil + } + for i := range out { + out[i] = 0 + } + return nil, ocbError("Tag authentication failed") +} + +// On instruction enc (resp. dec), crypt is the encrypt (resp. decrypt) +// function. It returns the resulting plain/ciphertext with the tag appended. +func (o *ocb) crypt(instruction int, Y, nonce, adata, X []byte) []byte { + // + // Consider X as a sequence of 128-bit blocks + // + // Note: For encryption (resp. decryption), X is the plaintext (resp., the + // ciphertext without the tag). + blockSize := o.block.BlockSize() + + // + // Nonce-dependent and per-encryption variables + // + // Zero out the last 6 bits of the nonce into truncatedNonce to see if Ktop + // is already computed. + truncatedNonce := make([]byte, len(nonce)) + copy(truncatedNonce, nonce) + truncatedNonce[len(truncatedNonce)-1] &= 192 + Ktop := make([]byte, blockSize) + if bytes.Equal(truncatedNonce, o.reusableKtop.noncePrefix) { + Ktop = o.reusableKtop.Ktop + } else { + // Nonce = num2str(TAGLEN mod 128, 7) || zeros(120 - bitlen(N)) || 1 || N + paddedNonce := append(make([]byte, blockSize-1-len(nonce)), 1) + paddedNonce = append(paddedNonce, truncatedNonce...) + paddedNonce[0] |= byte(((8 * o.tagSize) % (8 * blockSize)) << 1) + // Last 6 bits of paddedNonce are already zero. Encrypt into Ktop + paddedNonce[blockSize-1] &= 192 + Ktop = paddedNonce + o.block.Encrypt(Ktop, Ktop) + o.reusableKtop.noncePrefix = truncatedNonce + o.reusableKtop.Ktop = Ktop + } + + // Stretch = Ktop || ((lower half of Ktop) XOR (lower half of Ktop << 8)) + xorHalves := make([]byte, blockSize/2) + byteutil.XorBytes(xorHalves, Ktop[:blockSize/2], Ktop[1:1+blockSize/2]) + stretch := append(Ktop, xorHalves...) + bottom := int(nonce[len(nonce)-1] & 63) + offset := make([]byte, len(stretch)) + byteutil.ShiftNBytesLeft(offset, stretch, bottom) + offset = offset[:blockSize] + + // + // Process any whole blocks + // + // Note: For encryption Y is ciphertext || tag, for decryption Y is + // plaintext || tag. + checksum := make([]byte, blockSize) + m := len(X) / blockSize + for i := 0; i < m; i++ { + index := bits.TrailingZeros(uint(i + 1)) + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[bits.TrailingZeros(uint(i+1))]) + blockX := X[i*blockSize : (i+1)*blockSize] + blockY := Y[i*blockSize : (i+1)*blockSize] + byteutil.XorBytes(blockY, blockX, offset) + switch instruction { + case enc: + o.block.Encrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockX) + case dec: + o.block.Decrypt(blockY, blockY) + byteutil.XorBytesMut(blockY, offset) + byteutil.XorBytesMut(checksum, blockY) + } + } + // + // Process any final partial block and compute raw tag + // + tag := make([]byte, blockSize) + if len(X)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + pad := make([]byte, blockSize) + o.block.Encrypt(pad, offset) + chunkX := X[blockSize*m:] + chunkY := Y[blockSize*m : len(X)] + byteutil.XorBytes(chunkY, chunkX, pad[:len(chunkX)]) + // P_* || bit(1) || zeroes(127) - len(P_*) + switch instruction { + case enc: + paddedY := append(chunkX, byte(128)) + paddedY = append(paddedY, make([]byte, blockSize-len(chunkX)-1)...) + byteutil.XorBytesMut(checksum, paddedY) + case dec: + paddedX := append(chunkY, byte(128)) + paddedX = append(paddedX, make([]byte, blockSize-len(chunkY)-1)...) + byteutil.XorBytesMut(checksum, paddedX) + } + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m+len(chunkY):], tag[:o.tagSize]) + } else { + byteutil.XorBytes(tag, checksum, offset) + byteutil.XorBytesMut(tag, o.mask.lDol) + o.block.Encrypt(tag, tag) + byteutil.XorBytesMut(tag, o.hash(adata)) + copy(Y[blockSize*m:], tag[:o.tagSize]) + } + return Y +} + +// This hash function is used to compute the tag. Per design, on empty input it +// returns a slice of zeros, of the same length as the underlying block cipher +// block size. +func (o *ocb) hash(adata []byte) []byte { + // + // Consider A as a sequence of 128-bit blocks + // + A := make([]byte, len(adata)) + copy(A, adata) + blockSize := o.block.BlockSize() + + // + // Process any whole blocks + // + sum := make([]byte, blockSize) + offset := make([]byte, blockSize) + m := len(A) / blockSize + for i := 0; i < m; i++ { + chunk := A[blockSize*i : blockSize*(i+1)] + index := bits.TrailingZeros(uint(i + 1)) + // If the mask table is too short + if len(o.mask.L)-1 < index { + o.mask.extendTable(index) + } + byteutil.XorBytesMut(offset, o.mask.L[index]) + byteutil.XorBytesMut(chunk, offset) + o.block.Encrypt(chunk, chunk) + byteutil.XorBytesMut(sum, chunk) + } + + // + // Process any final partial block; compute final hash value + // + if len(A)%blockSize != 0 { + byteutil.XorBytesMut(offset, o.mask.lAst) + // Pad block with 1 || 0 ^ 127 - bitlength(a) + ending := make([]byte, blockSize-len(A)%blockSize) + ending[0] = 0x80 + encrypted := append(A[blockSize*m:], ending...) + byteutil.XorBytesMut(encrypted, offset) + o.block.Encrypt(encrypted, encrypted) + byteutil.XorBytesMut(sum, encrypted) + } + return sum +} + +func initializeMaskTable(block cipher.Block) mask { + // + // Key-dependent variables + // + lAst := make([]byte, block.BlockSize()) + block.Encrypt(lAst, lAst) + lDol := byteutil.GfnDouble(lAst) + L := make([][]byte, 1) + L[0] = byteutil.GfnDouble(lDol) + + return mask{ + lAst: lAst, + lDol: lDol, + L: L, + } +} + +// Extends the L array of mask m up to L[limit], with L[i] = GfnDouble(L[i-1]) +func (m *mask) extendTable(limit int) { + for i := len(m.L); i <= limit; i++ { + m.L = append(m.L, byteutil.GfnDouble(m.L[i-1])) + } +} + +func ocbError(err string) error { + return errors.New("crypto/ocb: " + err) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go new file mode 100644 index 0000000000..0efaf344fd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/random_vectors.go @@ -0,0 +1,136 @@ +// In the test vectors provided by RFC 7253, the "bottom" +// internal variable, which defines "offset" for the first time, does not +// exceed 15. However, it can attain values up to 63. + +// These vectors include key length in {128, 192, 256}, tag size 128, and +// random nonce, header, and plaintext lengths. + +// This file was automatically generated. + +package ocb + +var randomVectors = []struct { + key, nonce, header, plaintext, ciphertext string +}{ + + {"9438C5D599308EAF13F800D2D31EA7F0", + "C38EE4801BEBFFA1CD8635BE", + "0E507B7DADD8A98CDFE272D3CB6B3E8332B56AE583FB049C0874D4200BED16BD1A044182434E9DA0E841F182DFD5B3016B34641CED0784F1745F63AB3D0DA22D3351C9EF9A658B8081E24498EBF61FCE40DA6D8E184536", + "962D227786FB8913A8BAD5DC3250", + "EEDEF5FFA5986D1E3BF86DDD33EF9ADC79DCA06E215FA772CCBA814F63AD"}, + {"BA7DE631C7D6712167C6724F5B9A2B1D", + "35263EBDA05765DC0E71F1F5", + "0103257B4224507C0242FEFE821EA7FA42E0A82863E5F8B68F7D881B4B44FA428A2B6B21D2F591260802D8AB6D83", + "9D6D1FC93AE8A64E7889B7B2E3521EFA9B920A8DDB692E6F833DDC4A38AFA535E5E2A3ED82CB7E26404AB86C54D01C4668F28398C2DF33D5D561CBA1C8DCFA7A912F5048E545B59483C0E3221F54B14DAA2E4EB657B3BEF9554F34CAD69B2724AE962D3D8A", + "E93852D1985C5E775655E937FA79CE5BF28A585F2AF53A5018853B9634BE3C84499AC0081918FDCE0624494D60E25F76ACD6853AC7576E3C350F332249BFCABD4E73CEABC36BE4EDDA40914E598AE74174A0D7442149B26990899491BDDFE8FC54D6C18E83AE9E9A6FFBF5D376565633862EEAD88D"}, + {"2E74B25289F6FD3E578C24866E9C72A5", + "FD912F15025AF8414642BA1D1D", + "FB5FB8C26F365EEDAB5FE260C6E3CCD27806729C8335F146063A7F9EA93290E56CF84576EB446350D22AD730547C267B1F0BBB97EB34E1E2C41A", + "6C092EBF78F76EE8C1C6E592277D9545BA16EDB67BC7D8480B9827702DC2F8A129E2B08A2CE710CA7E1DA45CE162BB6CD4B512E632116E2211D3C90871EFB06B8D4B902681C7FB", + "6AC0A77F26531BF4F354A1737F99E49BE32ECD909A7A71AD69352906F54B08A9CE9B8CA5D724CBFFC5673437F23F630697F3B84117A1431D6FA8CC13A974FB4AD360300522E09511B99E71065D5AC4BBCB1D791E864EF4"}, + {"E7EC507C802528F790AFF5303A017B17", + "4B97A7A568940A9E3CE7A99E93031E", + "28349BDC5A09390C480F9B8AA3EDEA3DDB8B9D64BCA322C570B8225DF0E31190DAB25A4014BA39519E02ABFB12B89AA28BBFD29E486E7FB28734258C817B63CED9912DBAFEBB93E2798AB2890DE3B0ACFCFF906AB15563EF7823CE83D27CDB251195E22BD1337BCBDE65E7C2C427321C463C2777BFE5AEAA", + "9455B3EA706B74", + "7F33BA3EA848D48A96B9530E26888F43EBD4463C9399B6"}, + {"6C928AA3224736F28EE7378DE0090191", + "8936138E2E4C6A13280017A1622D", + "6202717F2631565BDCDC57C6584543E72A7C8BD444D0D108ED35069819633C", + "DA0691439E5F035F3E455269D14FE5C201C8C9B0A3FE2D3F86BCC59387C868FE65733D388360B31E3CE28B4BF6A8BE636706B536D5720DB66B47CF1C7A5AFD6F61E0EF90F1726D6B0E169F9A768B2B7AE4EE00A17F630AC905FCAAA1B707FFF25B3A1AAE83B504837C64A5639B2A34002B300EC035C9B43654DA55", + "B8804D182AB0F0EEB464FA7BD1329AD6154F982013F3765FEDFE09E26DAC078C9C1439BFC1159D6C02A25E3FF83EF852570117B315852AD5EE20E0FA3AA0A626B0E43BC0CEA38B44579DD36803455FB46989B90E6D229F513FD727AF8372517E9488384C515D6067704119C931299A0982EDDFB9C2E86A90C450C077EB222511EC9CCABC9FCFDB19F70088"}, + {"ECEA315CA4B3F425B0C9957A17805EA4", + "664CDAE18403F4F9BA13015A44FC", + "642AFB090D6C6DB46783F08B01A3EF2A8FEB5736B531EAC226E7888FCC8505F396818F83105065FACB3267485B9E5E4A0261F621041C08FCCB2A809A49AB5252A91D0971BCC620B9D614BD77E57A0EED2FA5", + "6852C31F8083E20E364CEA21BB7854D67CEE812FE1C9ED2425C0932A90D3780728D1BB", + "2ECEF962A9695A463ADABB275BDA9FF8B2BA57AEC2F52EFFB700CD9271A74D2A011C24AEA946051BD6291776429B7E681BA33E"}, + {"4EE616C4A58AAA380878F71A373461F6", + "91B8C9C176D9C385E9C47E52", + "CDA440B7F9762C572A718AC754EDEECC119E5EE0CCB9FEA4FFB22EEE75087C032EBF3DA9CDD8A28CC010B99ED45143B41A4BA50EA2A005473F89639237838867A57F23B0F0ED3BF22490E4501DAC9C658A9B9F", + "D6E645FA9AE410D15B8123FD757FA356A8DBE9258DDB5BE88832E615910993F497EC", + "B70ED7BF959FB2AAED4F36174A2A99BFB16992C8CDF369C782C4DB9C73DE78C5DB8E0615F647243B97ACDB24503BC9CADC48"}, + {"DCD475773136C830D5E3D0C5FE05B7FF", + "BB8E1FBB483BE7616A922C4A", + "36FEF2E1CB29E76A6EA663FC3AF66ECD7404F466382F7B040AABED62293302B56E8783EF7EBC21B4A16C3E78A7483A0A403F253A2CDC5BBF79DC3DAE6C73F39A961D8FBBE8D41B", + "441E886EA38322B2437ECA7DEB5282518865A66780A454E510878E61BFEC3106A3CD93D2A02052E6F9E1832F9791053E3B76BF4C07EFDD6D4106E3027FABB752E60C1AA425416A87D53938163817A1051EBA1D1DEEB4B9B25C7E97368B52E5911A31810B0EC5AF547559B6142D9F4C4A6EF24A4CF75271BF9D48F62B", + "1BE4DD2F4E25A6512C2CC71D24BBB07368589A94C2714962CD0ACE5605688F06342587521E75F0ACAFFD86212FB5C34327D238DB36CF2B787794B9A4412E7CD1410EA5DDD2450C265F29CF96013CD213FD2880657694D718558964BC189B4A84AFCF47EB012935483052399DBA5B088B0A0477F20DFE0E85DCB735E21F22A439FB837DD365A93116D063E607"}, + {"3FBA2B3D30177FFE15C1C59ED2148BB2C091F5615FBA7C07", + "FACF804A4BEBF998505FF9DE", + "8213B9263B2971A5BDA18DBD02208EE1", + "15B323926993B326EA19F892D704439FC478828322AF72118748284A1FD8A6D814E641F70512FD706980337379F31DC63355974738D7FEA87AD2858C0C2EBBFBE74371C21450072373C7B651B334D7C4D43260B9D7CCD3AF9EDB", + "6D35DC1469B26E6AAB26272A41B46916397C24C485B61162E640A062D9275BC33DDCFD3D9E1A53B6C8F51AC89B66A41D59B3574197A40D9B6DCF8A4E2A001409C8112F16B9C389E0096179DB914E05D6D11ED0005AD17E1CE105A2F0BAB8F6B1540DEB968B7A5428FF44"}, + {"53B52B8D4D748BCDF1DDE68857832FA46227FA6E2F32EFA1", + "0B0EF53D4606B28D1398355F", + "F23882436349094AF98BCACA8218E81581A043B19009E28EFBF2DE37883E04864148CC01D240552CA8844EC1456F42034653067DA67E80F87105FD06E14FF771246C9612867BE4D215F6D761", + "F15030679BD4088D42CAC9BF2E9606EAD4798782FA3ED8C57EBE7F84A53236F51B25967C6489D0CD20C9EEA752F9BC", + "67B96E2D67C3729C96DAEAEDF821D61C17E648643A2134C5621FEC621186915AD80864BFD1EB5B238BF526A679385E012A457F583AFA78134242E9D9C1B4E4"}, + {"0272DD80F23399F49BFC320381A5CD8225867245A49A7D41", + "5C83F4896D0738E1366B1836", + "69B0337289B19F73A12BAEEA857CCAF396C11113715D9500CCCF48BA08CFF12BC8B4BADB3084E63B85719DB5058FA7C2C11DEB096D7943CFA7CAF5", + "C01AD10FC8B562CD17C7BC2FAB3E26CBDFF8D7F4DEA816794BBCC12336991712972F52816AABAB244EB43B0137E2BAC1DD413CE79531E78BEF782E6B439612BB3AEF154DE3502784F287958EBC159419F9EBA27916A28D6307324129F506B1DE80C1755A929F87", + "FEFE52DD7159C8DD6E8EC2D3D3C0F37AB6CB471A75A071D17EC4ACDD8F3AA4D7D4F7BB559F3C09099E3D9003E5E8AA1F556B79CECDE66F85B08FA5955E6976BF2695EA076388A62D2AD5BAB7CBF1A7F3F4C8D5CDF37CDE99BD3E30B685D9E5EEE48C7C89118EF4878EB89747F28271FA2CC45F8E9E7601"}, + {"3EEAED04A455D6E5E5AB53CFD5AFD2F2BC625C7BF4BE49A5", + "36B88F63ADBB5668588181D774", + "D367E3CB3703E762D23C6533188EF7028EFF9D935A3977150361997EC9DEAF1E4794BDE26AA8B53C124980B1362EC86FCDDFC7A90073171C1BAEE351A53234B86C66E8AB92FAE99EC6967A6D3428892D80", + "573454C719A9A55E04437BF7CBAAF27563CCCD92ADD5E515CD63305DFF0687E5EEF790C5DCA5C0033E9AB129505E2775438D92B38F08F3B0356BA142C6F694", + "E9F79A5B432D9E682C9AAA5661CFC2E49A0FCB81A431E54B42EB73DD3BED3F377FEC556ABA81624BA64A5D739AD41467460088F8D4F442180A9382CA635745473794C382FCDDC49BA4EB6D8A44AE3C"}, + {"B695C691538F8CBD60F039D0E28894E3693CC7C36D92D79D", + "BC099AEB637361BAC536B57618", + "BFFF1A65AE38D1DC142C71637319F5F6508E2CB33C9DCB94202B359ED5A5ED8042E7F4F09231D32A7242976677E6F4C549BF65FADC99E5AF43F7A46FD95E16C2", + "081DF3FD85B415D803F0BE5AC58CFF0023FDDED99788296C3731D8", + "E50C64E3614D94FE69C47092E46ACC9957C6FEA2CCBF96BC62FBABE7424753C75F9C147C42AE26FE171531"}, + {"C9ACBD2718F0689A1BE9802A551B6B8D9CF5614DAF5E65ED", + "B1B0AAF373B8B026EB80422051D8", + "6648C0E61AC733C76119D23FB24548D637751387AA2EAE9D80E912B7BD486CAAD9EAF4D7A5FE2B54AAD481E8EC94BB4D558000896E2010462B70C9FED1E7273080D1", + "189F591F6CB6D59AFEDD14C341741A8F1037DC0DF00FC57CE65C30F49E860255CEA5DC6019380CC0FE8880BC1A9E685F41C239C38F36E3F2A1388865C5C311059C0A", + "922A5E949B61D03BE34AB5F4E58607D4504EA14017BB363DAE3C873059EA7A1C77A746FB78981671D26C2CF6D9F24952D510044CE02A10177E9DB42D0145211DFE6E84369C5E3BC2669EAB4147B2822895F9"}, + {"7A832BD2CF5BF4919F353CE2A8C86A5E406DA2D52BE16A72", + "2F2F17CECF7E5A756D10785A3CB9DB", + "61DA05E3788CC2D8405DBA70C7A28E5AF699863C9F72E6C6770126929F5D6FA267F005EBCF49495CB46400958A3AE80D1289D1C671", + "44E91121195A41AF14E8CFDBD39A4B517BE0DF1A72977ED8A3EEF8EEDA1166B2EB6DB2C4AE2E74FA0F0C74537F659BFBD141E5DDEC67E64EDA85AABD3F52C85A785B9FB3CECD70E7DF", + "BEDF596EA21288D2B84901E188F6EE1468B14D5161D3802DBFE00D60203A24E2AB62714BF272A45551489838C3A7FEAADC177B591836E73684867CCF4E12901DCF2064058726BBA554E84ADC5136F507E961188D4AF06943D3"}, + {"1508E8AE9079AA15F1CEC4F776B4D11BCCB061B58AA56C18", + "BCA625674F41D1E3AB47672DC0C3", + "8B12CF84F16360F0EAD2A41BC021530FFCEC7F3579CAE658E10E2D3D81870F65AFCED0C77C6C4C6E6BA424FF23088C796BA6195ABA35094BF1829E089662E7A95FC90750AE16D0C8AFA55DAC789D7735B970B58D4BE7CEC7341DA82A0179A01929C27A59C5063215B859EA43", + "E525422519ECE070E82C", + "B47BC07C3ED1C0A43BA52C43CBACBCDBB29CAF1001E09FDF7107"}, + {"7550C2761644E911FE9ADD119BAC07376BEA442845FEAD876D7E7AC1B713E464", + "36D2EC25ADD33CDEDF495205BBC923", + "7FCFE81A3790DE97FFC3DE160C470847EA7E841177C2F759571CBD837EA004A6CA8C6F4AEBFF2E9FD552D73EB8A30705D58D70C0B67AEEA280CBBF0A477358ACEF1E7508F2735CD9A0E4F9AC92B8C008F575D3B6278F1C18BD01227E3502E5255F3AB1893632AD00C717C588EF652A51A43209E7EE90", + "2B1A62F8FDFAA3C16470A21AD307C9A7D03ADE8EF72C69B06F8D738CDE578D7AEFD0D40BD9C022FB9F580DF5394C998ACCCEFC5471A3996FB8F1045A81FDC6F32D13502EA65A211390C8D882B8E0BEFD8DD8CBEF51D1597B124E9F7F", + "C873E02A22DB89EB0787DB6A60B99F7E4A0A085D5C4232A81ADCE2D60AA36F92DDC33F93DD8640AC0E08416B187FB382B3EC3EE85A64B0E6EE41C1366A5AD2A282F66605E87031CCBA2FA7B2DA201D975994AADE3DD1EE122AE09604AD489B84BF0C1AB7129EE16C6934850E"}, + {"A51300285E554FDBDE7F771A9A9A80955639DD87129FAEF74987C91FB9687C71", + "81691D5D20EC818FCFF24B33DECC", + "C948093218AA9EB2A8E44A87EEA73FC8B6B75A196819A14BD83709EA323E8DF8B491045220E1D88729A38DBCFFB60D3056DAD4564498FD6574F74512945DEB34B69329ACED9FFC05D5D59DFCD5B973E2ACAFE6AD1EF8BBBC49351A2DD12508ED89ED", + "EB861165DAF7625F827C6B574ED703F03215", + "C6CD1CE76D2B3679C1B5AA1CFD67CCB55444B6BFD3E22C81CBC9BB738796B83E54E3"}, + {"8CE0156D26FAEB7E0B9B800BBB2E9D4075B5EAC5C62358B0E7F6FCE610223282", + "D2A7B94DD12CDACA909D3AD7", + "E021A78F374FC271389AB9A3E97077D755", + "7C26000B58929F5095E1CEE154F76C2A299248E299F9B5ADE6C403AA1FD4A67FD4E0232F214CE7B919EE7A1027D2B76C57475715CD078461", + "C556FB38DF069B56F337B5FF5775CE6EAA16824DFA754F20B78819028EA635C3BB7AA731DE8776B2DCB67DCA2D33EEDF3C7E52EA450013722A41755A0752433ED17BDD5991AAE77A"}, + {"1E8000A2CE00A561C9920A30BF0D7B983FEF8A1014C8F04C35CA6970E6BA02BD", + "65ED3D63F79F90BBFD19775E", + "336A8C0B7243582A46B221AA677647FCAE91", + "134A8B34824A290E7B", + "914FBEF80D0E6E17F8BDBB6097EBF5FBB0554952DC2B9E5151"}, + {"53D5607BBE690B6E8D8F6D97F3DF2BA853B682597A214B8AA0EA6E598650AF15", + "C391A856B9FE234E14BA1AC7BB40FF", + "479682BC21349C4BE1641D5E78FE2C79EC1B9CF5470936DCAD9967A4DCD7C4EFADA593BC9EDE71E6A08829B8580901B61E274227E9D918502DE3", + "EAD154DC09C5E26C5D26FF33ED148B27120C7F2C23225CC0D0631B03E1F6C6D96FEB88C1A4052ACB4CE746B884B6502931F407021126C6AAB8C514C077A5A38438AE88EE", + "938821286EBB671D999B87C032E1D6055392EB564E57970D55E545FC5E8BAB90E6E3E3C0913F6320995FC636D72CD9919657CC38BD51552F4A502D8D1FE56DB33EBAC5092630E69EBB986F0E15CEE9FC8C052501"}, + {"294362FCC984F440CEA3E9F7D2C06AF20C53AAC1B3738CA2186C914A6E193ABB", + "B15B61C8BB39261A8F55AB178EC3", + "D0729B6B75BB", + "2BD089ADCE9F334BAE3B065996C7D616DD0C27DF4218DCEEA0FBCA0F968837CE26B0876083327E25681FDDD620A32EC0DA12F73FAE826CC94BFF2B90A54D2651", + "AC94B25E4E21DE2437B806966CCD5D9385EF0CD4A51AB9FA6DE675C7B8952D67802E9FEC1FDE9F5D1EAB06057498BC0EEA454804FC9D2068982A3E24182D9AC2E7AB9994DDC899A604264583F63D066B"}, + {"959DBFEB039B1A5B8CE6A44649B602AAA5F98A906DB96143D202CD2024F749D9", + "01D7BDB1133E9C347486C1EFA6", + "F3843955BD741F379DD750585EDC55E2CDA05CCBA8C1F4622AC2FE35214BC3A019B8BD12C4CC42D9213D1E1556941E8D8450830287FFB3B763A13722DD4140ED9846FB5FFF745D7B0B967D810A068222E10B259AF1D392035B0D83DC1498A6830B11B2418A840212599171E0258A1C203B05362978", + "A21811232C950FA8B12237C2EBD6A7CD2C3A155905E9E0C7C120", + "63C1CE397B22F1A03F1FA549B43178BC405B152D3C95E977426D519B3DFCA28498823240592B6EEE7A14"}, + {"096AE499F5294173F34FF2B375F0E5D5AB79D0D03B33B1A74D7D576826345DF4", + "0C52B3D11D636E5910A4DD76D32C", + "229E9ECA3053789E937447BC719467075B6138A142DA528DA8F0CF8DDF022FD9AF8E74779BA3AC306609", + "8B7A00038783E8BAF6EDEAE0C4EAB48FC8FD501A588C7E4A4DB71E3604F2155A97687D3D2FFF8569261375A513CF4398CE0F87CA1658A1050F6EF6C4EA3E25", + "C20B6CF8D3C8241825FD90B2EDAC7593600646E579A8D8DAAE9E2E40C3835FE801B2BE4379131452BC5182C90307B176DFBE2049544222FE7783147B690774F6D9D7CEF52A91E61E298E9AA15464AC"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go new file mode 100644 index 0000000000..843085589c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_a.go @@ -0,0 +1,78 @@ +package ocb + +import ( + "encoding/hex" +) + +// Test vectors from https://tools.ietf.org/html/rfc7253. Note that key is +// shared accross tests. +var testKey, _ = hex.DecodeString("000102030405060708090A0B0C0D0E0F") + +var rfc7253testVectors = []struct { + nonce, header, plaintext, ciphertext string +}{ + {"BBAA99887766554433221100", + "", + "", + "785407BFFFC8AD9EDCC5520AC9111EE6"}, + {"BBAA99887766554433221101", + "0001020304050607", + "0001020304050607", + "6820B3657B6F615A5725BDA0D3B4EB3A257C9AF1F8F03009"}, + {"BBAA99887766554433221102", + "0001020304050607", + "", + "81017F8203F081277152FADE694A0A00"}, + {"BBAA99887766554433221103", + "", + "0001020304050607", + "45DD69F8F5AAE72414054CD1F35D82760B2CD00D2F99BFA9"}, + {"BBAA99887766554433221104", + "000102030405060708090A0B0C0D0E0F", + "000102030405060708090A0B0C0D0E0F", + "571D535B60B277188BE5147170A9A22C3AD7A4FF3835B8C5701C1CCEC8FC3358"}, + {"BBAA99887766554433221105", + "000102030405060708090A0B0C0D0E0F", + "", + "8CF761B6902EF764462AD86498CA6B97"}, + {"BBAA99887766554433221106", + "", + "000102030405060708090A0B0C0D0E0F", + "5CE88EC2E0692706A915C00AEB8B2396F40E1C743F52436BDF06D8FA1ECA343D"}, + {"BBAA99887766554433221107", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "1CA2207308C87C010756104D8840CE1952F09673A448A122C92C62241051F57356D7F3C90BB0E07F"}, + {"BBAA99887766554433221108", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "", + "6DC225A071FC1B9F7C69F93B0F1E10DE"}, + {"BBAA99887766554433221109", + "", + "000102030405060708090A0B0C0D0E0F1011121314151617", + "221BD0DE7FA6FE993ECCD769460A0AF2D6CDED0C395B1C3CE725F32494B9F914D85C0B1EB38357FF"}, + {"BBAA9988776655443322110A", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "BD6F6C496201C69296C11EFD138A467ABD3C707924B964DEAFFC40319AF5A48540FBBA186C5553C68AD9F592A79A4240"}, + {"BBAA9988776655443322110B", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "", + "FE80690BEE8A485D11F32965BC9D2A32"}, + {"BBAA9988776655443322110C", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F", + "2942BFC773BDA23CABC6ACFD9BFD5835BD300F0973792EF46040C53F1432BCDFB5E1DDE3BC18A5F840B52E653444D5DF"}, + {"BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "D5CA91748410C1751FF8A2F618255B68A0A12E093FF454606E59F9C1D0DDC54B65E8628E568BAD7AED07BA06A4A69483A7035490C5769E60"}, + {"BBAA9988776655443322110E", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "", + "C5CD9D1850C141E358649994EE701B68"}, + {"BBAA9988776655443322110F", + "", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "4412923493C57D5DE0D700F753CCE0D1D2D95060122E9F15A5DDBFC5787E50B5CC55EE507BCB084E479AD363AC366B95A98CA5F3000B1479"}, +} diff --git a/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go new file mode 100644 index 0000000000..5dc158f012 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/ocb/rfc7253_test_vectors_suite_b.go @@ -0,0 +1,24 @@ +package ocb + +// Second set of test vectors from https://tools.ietf.org/html/rfc7253 +var rfc7253TestVectorTaglen96 = struct { + key, nonce, header, plaintext, ciphertext string +}{"0F0E0D0C0B0A09080706050403020100", + "BBAA9988776655443322110D", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F2021222324252627", + "1792A4E31E0755FB03E31B22116E6C2DDF9EFD6E33D536F1A0124B0A55BAE884ED93481529C76B6AD0C515F4D1CDD4FDAC4F02AA"} + +var rfc7253AlgorithmTest = []struct { + KEYLEN, TAGLEN int + OUTPUT string }{ + {128, 128, "67E944D23256C5E0B6C61FA22FDF1EA2"}, + {192, 128, "F673F2C3E7174AAE7BAE986CA9F29E17"}, + {256, 128, "D90EB8E9C977C88B79DD793D7FFA161C"}, + {128, 96, "77A3D8E73589158D25D01209"}, + {192, 96, "05D56EAD2752C86BE6932C5E"}, + {256, 96, "5458359AC23B0CBA9E6330DD"}, + {128, 64, "192C9B7BD90BA06A"}, + {192, 64, "0066BC6E0EF34E24"}, + {256, 64, "7D4EA5D445501CBE"}, + } diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go new file mode 100644 index 0000000000..3c6251d1ce --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/aes/keywrap/keywrap.go @@ -0,0 +1,153 @@ +// Copyright 2014 Matthew Endsley +// All rights reserved +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted providing that the following conditions +// are met: +// 1. Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY +// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +// OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +// IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. + +// Package keywrap is an implementation of the RFC 3394 AES key wrapping +// algorithm. This is used in OpenPGP with elliptic curve keys. +package keywrap + +import ( + "crypto/aes" + "encoding/binary" + "errors" +) + +var ( + // ErrWrapPlaintext is returned if the plaintext is not a multiple + // of 64 bits. + ErrWrapPlaintext = errors.New("keywrap: plainText must be a multiple of 64 bits") + + // ErrUnwrapCiphertext is returned if the ciphertext is not a + // multiple of 64 bits. + ErrUnwrapCiphertext = errors.New("keywrap: cipherText must by a multiple of 64 bits") + + // ErrUnwrapFailed is returned if unwrapping a key fails. + ErrUnwrapFailed = errors.New("keywrap: failed to unwrap key") + + // NB: the AES NewCipher call only fails if the key is an invalid length. + + // ErrInvalidKey is returned when the AES key is invalid. + ErrInvalidKey = errors.New("keywrap: invalid AES key") +) + +// Wrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Wrap(key, plainText []byte) ([]byte, error) { + if len(plainText)%8 != 0 { + return nil, ErrWrapPlaintext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(plainText) / 8 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = IV, an initial value (see 2.2.3) + for ii := 0; ii < 8; ii++ { + block[ii] = 0xA6 + } + + // - For i = 1 to n + // - Set R[i] = P[i] + intermediate := make([]byte, len(plainText)) + copy(intermediate, plainText) + + // 2) Calculate intermediate values. + for ii := 0; ii < 6; ii++ { + for jj := 0; jj < nblocks; jj++ { + // - B = AES(K, A | R[i]) + copy(block[8:], intermediate[jj*8:jj*8+8]) + c.Encrypt(block[:], block[:]) + + // - A = MSB(64, B) ^ t where t = (n*j)+1 + t := uint64(ii*nblocks + jj + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + // - R[i] = LSB(64, B) + copy(intermediate[jj*8:jj*8+8], block[8:]) + } + } + + // 3) Output results. + // - Set C[0] = A + // - For i = 1 to n + // - C[i] = R[i] + return append(block[:8], intermediate...), nil +} + +// Unwrap a key using the RFC 3394 AES Key Wrap Algorithm. +func Unwrap(key, cipherText []byte) ([]byte, error) { + if len(cipherText)%8 != 0 { + return nil, ErrUnwrapCiphertext + } + + c, err := aes.NewCipher(key) + if err != nil { + return nil, ErrInvalidKey + } + + nblocks := len(cipherText)/8 - 1 + + // 1) Initialize variables. + var block [aes.BlockSize]byte + // - Set A = C[0] + copy(block[:8], cipherText[:8]) + + // - For i = 1 to n + // - Set R[i] = C[i] + intermediate := make([]byte, len(cipherText)-8) + copy(intermediate, cipherText[8:]) + + // 2) Compute intermediate values. + for jj := 5; jj >= 0; jj-- { + for ii := nblocks - 1; ii >= 0; ii-- { + // - B = AES-1(K, (A ^ t) | R[i]) where t = n*j+1 + // - A = MSB(64, B) + t := uint64(jj*nblocks + ii + 1) + val := binary.BigEndian.Uint64(block[:8]) ^ t + binary.BigEndian.PutUint64(block[:8], val) + + copy(block[8:], intermediate[ii*8:ii*8+8]) + c.Decrypt(block[:], block[:]) + + // - R[i] = LSB(B, 64) + copy(intermediate[ii*8:ii*8+8], block[8:]) + } + } + + // 3) Output results. + // - If A is an appropriate initial value (see 2.2.3), + for ii := 0; ii < 8; ii++ { + if block[ii] != 0xA6 { + return nil, ErrUnwrapFailed + } + } + + // - For i = 1 to n + // - P[i] = R[i] + return intermediate, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go new file mode 100644 index 0000000000..3b357e5851 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/armor.go @@ -0,0 +1,224 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package armor implements OpenPGP ASCII Armor, see RFC 4880. OpenPGP Armor is +// very similar to PEM except that it has an additional CRC checksum. +package armor // import "github.com/ProtonMail/go-crypto/openpgp/armor" + +import ( + "bufio" + "bytes" + "encoding/base64" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" +) + +// A Block represents an OpenPGP armored structure. +// +// The encoded form is: +// -----BEGIN Type----- +// Headers +// +// base64-encoded Bytes +// '=' base64 encoded checksum +// -----END Type----- +// where Headers is a possibly empty sequence of Key: Value lines. +// +// Since the armored data can be very large, this package presents a streaming +// interface. +type Block struct { + Type string // The type, taken from the preamble (i.e. "PGP SIGNATURE"). + Header map[string]string // Optional headers. + Body io.Reader // A Reader from which the contents can be read + lReader lineReader + oReader openpgpReader +} + +var ArmorCorrupt error = errors.StructuralError("armor invalid") + +const crc24Init = 0xb704ce +const crc24Poly = 0x1864cfb +const crc24Mask = 0xffffff + +// crc24 calculates the OpenPGP checksum as specified in RFC 4880, section 6.1 +func crc24(crc uint32, d []byte) uint32 { + for _, b := range d { + crc ^= uint32(b) << 16 + for i := 0; i < 8; i++ { + crc <<= 1 + if crc&0x1000000 != 0 { + crc ^= crc24Poly + } + } + } + return crc +} + +var armorStart = []byte("-----BEGIN ") +var armorEnd = []byte("-----END ") +var armorEndOfLine = []byte("-----") + +// lineReader wraps a line based reader. It watches for the end of an armor +// block and records the expected CRC value. +type lineReader struct { + in *bufio.Reader + buf []byte + eof bool + crc uint32 + crcSet bool +} + +func (l *lineReader) Read(p []byte) (n int, err error) { + if l.eof { + return 0, io.EOF + } + + if len(l.buf) > 0 { + n = copy(p, l.buf) + l.buf = l.buf[n:] + return + } + + line, isPrefix, err := l.in.ReadLine() + if err != nil { + return + } + if isPrefix { + return 0, ArmorCorrupt + } + + if bytes.HasPrefix(line, armorEnd) { + l.eof = true + return 0, io.EOF + } + + if len(line) == 5 && line[0] == '=' { + // This is the checksum line + var expectedBytes [3]byte + var m int + m, err = base64.StdEncoding.Decode(expectedBytes[0:], line[1:]) + if m != 3 || err != nil { + return + } + l.crc = uint32(expectedBytes[0])<<16 | + uint32(expectedBytes[1])<<8 | + uint32(expectedBytes[2]) + + line, _, err = l.in.ReadLine() + if err != nil && err != io.EOF { + return + } + if !bytes.HasPrefix(line, armorEnd) { + return 0, ArmorCorrupt + } + + l.eof = true + l.crcSet = true + return 0, io.EOF + } + + if len(line) > 96 { + return 0, ArmorCorrupt + } + + n = copy(p, line) + bytesToSave := len(line) - n + if bytesToSave > 0 { + if cap(l.buf) < bytesToSave { + l.buf = make([]byte, 0, bytesToSave) + } + l.buf = l.buf[0:bytesToSave] + copy(l.buf, line[n:]) + } + + return +} + +// openpgpReader passes Read calls to the underlying base64 decoder, but keeps +// a running CRC of the resulting data and checks the CRC against the value +// found by the lineReader at EOF. +type openpgpReader struct { + lReader *lineReader + b64Reader io.Reader + currentCRC uint32 +} + +func (r *openpgpReader) Read(p []byte) (n int, err error) { + n, err = r.b64Reader.Read(p) + r.currentCRC = crc24(r.currentCRC, p[:n]) + + if err == io.EOF && r.lReader.crcSet && r.lReader.crc != uint32(r.currentCRC&crc24Mask) { + return 0, ArmorCorrupt + } + + return +} + +// Decode reads a PGP armored block from the given Reader. It will ignore +// leading garbage. If it doesn't find a block, it will return nil, io.EOF. The +// given Reader is not usable after calling this function: an arbitrary amount +// of data may have been read past the end of the block. +func Decode(in io.Reader) (p *Block, err error) { + r := bufio.NewReaderSize(in, 100) + var line []byte + ignoreNext := false + +TryNextBlock: + p = nil + + // Skip leading garbage + for { + ignoreThis := ignoreNext + line, ignoreNext, err = r.ReadLine() + if err != nil { + return + } + if ignoreNext || ignoreThis { + continue + } + line = bytes.TrimSpace(line) + if len(line) > len(armorStart)+len(armorEndOfLine) && bytes.HasPrefix(line, armorStart) { + break + } + } + + p = new(Block) + p.Type = string(line[len(armorStart) : len(line)-len(armorEndOfLine)]) + p.Header = make(map[string]string) + nextIsContinuation := false + var lastKey string + + // Read headers + for { + isContinuation := nextIsContinuation + line, nextIsContinuation, err = r.ReadLine() + if err != nil { + p = nil + return + } + if isContinuation { + p.Header[lastKey] += string(line) + continue + } + line = bytes.TrimSpace(line) + if len(line) == 0 { + break + } + + i := bytes.Index(line, []byte(": ")) + if i == -1 { + goto TryNextBlock + } + lastKey = string(line[:i]) + p.Header[lastKey] = string(line[i+2:]) + } + + p.lReader.in = r + p.oReader.currentCRC = crc24Init + p.oReader.lReader = &p.lReader + p.oReader.b64Reader = base64.NewDecoder(base64.StdEncoding, &p.lReader) + p.Body = &p.oReader + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go new file mode 100644 index 0000000000..6f07582c37 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/armor/encode.go @@ -0,0 +1,160 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package armor + +import ( + "encoding/base64" + "io" +) + +var armorHeaderSep = []byte(": ") +var blockEnd = []byte("\n=") +var newline = []byte("\n") +var armorEndOfLineOut = []byte("-----\n") + +// writeSlices writes its arguments to the given Writer. +func writeSlices(out io.Writer, slices ...[]byte) (err error) { + for _, s := range slices { + _, err = out.Write(s) + if err != nil { + return err + } + } + return +} + +// lineBreaker breaks data across several lines, all of the same byte length +// (except possibly the last). Lines are broken with a single '\n'. +type lineBreaker struct { + lineLength int + line []byte + used int + out io.Writer + haveWritten bool +} + +func newLineBreaker(out io.Writer, lineLength int) *lineBreaker { + return &lineBreaker{ + lineLength: lineLength, + line: make([]byte, lineLength), + used: 0, + out: out, + } +} + +func (l *lineBreaker) Write(b []byte) (n int, err error) { + n = len(b) + + if n == 0 { + return + } + + if l.used == 0 && l.haveWritten { + _, err = l.out.Write([]byte{'\n'}) + if err != nil { + return + } + } + + if l.used+len(b) < l.lineLength { + l.used += copy(l.line[l.used:], b) + return + } + + l.haveWritten = true + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + excess := l.lineLength - l.used + l.used = 0 + + _, err = l.out.Write(b[0:excess]) + if err != nil { + return + } + + _, err = l.Write(b[excess:]) + return +} + +func (l *lineBreaker) Close() (err error) { + if l.used > 0 { + _, err = l.out.Write(l.line[0:l.used]) + if err != nil { + return + } + } + + return +} + +// encoding keeps track of a running CRC24 over the data which has been written +// to it and outputs a OpenPGP checksum when closed, followed by an armor +// trailer. +// +// It's built into a stack of io.Writers: +// encoding -> base64 encoder -> lineBreaker -> out +type encoding struct { + out io.Writer + breaker *lineBreaker + b64 io.WriteCloser + crc uint32 + blockType []byte +} + +func (e *encoding) Write(data []byte) (n int, err error) { + e.crc = crc24(e.crc, data) + return e.b64.Write(data) +} + +func (e *encoding) Close() (err error) { + err = e.b64.Close() + if err != nil { + return + } + e.breaker.Close() + + var checksumBytes [3]byte + checksumBytes[0] = byte(e.crc >> 16) + checksumBytes[1] = byte(e.crc >> 8) + checksumBytes[2] = byte(e.crc) + + var b64ChecksumBytes [4]byte + base64.StdEncoding.Encode(b64ChecksumBytes[:], checksumBytes[:]) + + return writeSlices(e.out, blockEnd, b64ChecksumBytes[:], newline, armorEnd, e.blockType, armorEndOfLine) +} + +// Encode returns a WriteCloser which will encode the data written to it in +// OpenPGP armor. +func Encode(out io.Writer, blockType string, headers map[string]string) (w io.WriteCloser, err error) { + bType := []byte(blockType) + err = writeSlices(out, armorStart, bType, armorEndOfLineOut) + if err != nil { + return + } + + for k, v := range headers { + err = writeSlices(out, []byte(k), armorHeaderSep, []byte(v), newline) + if err != nil { + return + } + } + + _, err = out.Write(newline) + if err != nil { + return + } + + e := &encoding{ + out: out, + breaker: newLineBreaker(out, 64), + crc: crc24Init, + blockType: bType, + } + e.b64 = base64.NewEncoder(base64.StdEncoding, e.breaker) + return e, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go new file mode 100644 index 0000000000..a94f6150c4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/canonical_text.go @@ -0,0 +1,65 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "hash" + "io" +) + +// NewCanonicalTextHash reformats text written to it into the canonical +// form and then applies the hash h. See RFC 4880, section 5.2.1. +func NewCanonicalTextHash(h hash.Hash) hash.Hash { + return &canonicalTextHash{h, 0} +} + +type canonicalTextHash struct { + h hash.Hash + s int +} + +var newline = []byte{'\r', '\n'} + +func writeCanonical(cw io.Writer, buf []byte, s *int) (int, error) { + start := 0 + for i, c := range buf { + switch *s { + case 0: + if c == '\r' { + *s = 1 + } else if c == '\n' { + cw.Write(buf[start:i]) + cw.Write(newline) + start = i + 1 + } + case 1: + *s = 0 + } + } + + cw.Write(buf[start:]) + return len(buf), nil +} + +func (cth *canonicalTextHash) Write(buf []byte) (int, error) { + return writeCanonical(cth.h, buf, &cth.s) +} + +func (cth *canonicalTextHash) Sum(in []byte) []byte { + return cth.h.Sum(in) +} + +func (cth *canonicalTextHash) Reset() { + cth.h.Reset() + cth.s = 0 +} + +func (cth *canonicalTextHash) Size() int { + return cth.h.Size() +} + +func (cth *canonicalTextHash) BlockSize() int { + return cth.h.BlockSize() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go new file mode 100644 index 0000000000..0b49be4bf3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/ecdh.go @@ -0,0 +1,165 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "bytes" + "crypto/elliptic" + "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" +) + +type KDF struct { + Hash algorithm.Hash + Cipher algorithm.Cipher +} + +type PublicKey struct { + ecc.CurveType + elliptic.Curve + X, Y *big.Int + KDF +} + +type PrivateKey struct { + PublicKey + D []byte +} + +func GenerateKey(c elliptic.Curve, kdf KDF, rand io.Reader) (priv *PrivateKey, err error) { + priv = new(PrivateKey) + priv.PublicKey.Curve = c + priv.PublicKey.KDF = kdf + priv.D, priv.PublicKey.X, priv.PublicKey.Y, err = elliptic.GenerateKey(c, rand) + return +} + +func Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + if len(msg) > 40 { + return nil, nil, errors.New("ecdh: message too long") + } + // the sender MAY use 21, 13, and 5 bytes of padding for AES-128, + // AES-192, and AES-256, respectively, to provide the same number of + // octets, 40 total, as an input to the key wrapping method. + padding := make([]byte, 40-len(msg)) + for i := range padding { + padding[i] = byte(40 - len(msg)) + } + m := append(msg, padding...) + + if pub.CurveType == ecc.Curve25519 { + return X25519Encrypt(random, pub, m, curveOID, fingerprint) + } + + d, x, y, err := elliptic.GenerateKey(pub.Curve, random) + if err != nil { + return nil, nil, err + } + + vsG = elliptic.Marshal(pub.Curve, x, y) + zbBig, _ := pub.Curve.ScalarMult(pub.X, pub.Y, d) + + byteLen := (pub.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + z, err := buildKey(pub, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, m); err != nil { + return nil, nil, err + } + + return vsG, c, nil + +} + +func Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { + if priv.PublicKey.CurveType == ecc.Curve25519 { + return X25519Decrypt(priv, vsG, m, curveOID, fingerprint) + } + x, y := elliptic.Unmarshal(priv.Curve, vsG) + zbBig, _ := priv.Curve.ScalarMult(x, y, priv.D) + + byteLen := (priv.Curve.Params().BitSize + 7) >> 3 + zb := make([]byte, byteLen) + zbBytes := zbBig.Bytes() + copy(zb[byteLen-len(zbBytes):], zbBytes) + + z, err := buildKey(&priv.PublicKey, zb, curveOID, fingerprint, false, false) + if err != nil { + return nil, err + } + + c, err := keywrap.Unwrap(z, m) + if err != nil { + return nil, err + } + + return c[:len(c)-int(c[len(c)-1])], nil +} + +func buildKey(pub *PublicKey, zb []byte, curveOID, fingerprint []byte, stripLeading, stripTrailing bool) ([]byte, error) { + // Param = curve_OID_len || curve_OID || public_key_alg_ID || 03 + // || 01 || KDF_hash_ID || KEK_alg_ID for AESKeyWrap + // || "Anonymous Sender " || recipient_fingerprint; + param := new(bytes.Buffer) + if _, err := param.Write(curveOID); err != nil { + return nil, err + } + algKDF := []byte{18, 3, 1, pub.KDF.Hash.Id(), pub.KDF.Cipher.Id()} + if _, err := param.Write(algKDF); err != nil { + return nil, err + } + if _, err := param.Write([]byte("Anonymous Sender ")); err != nil { + return nil, err + } + // For v5 keys, the 20 leftmost octets of the fingerprint are used. + if _, err := param.Write(fingerprint[:20]); err != nil { + return nil, err + } + if param.Len() - len(curveOID) != 45 { + return nil, errors.New("ecdh: malformed KDF Param") + } + + // MB = Hash ( 00 || 00 || 00 || 01 || ZB || Param ); + h := pub.KDF.Hash.New() + if _, err := h.Write([]byte{0x0, 0x0, 0x0, 0x1}); err != nil { + return nil, err + } + zbLen := len(zb) + i := 0 + j := zbLen - 1 + if stripLeading { + // Work around old go crypto bug where the leading zeros are missing. + for ; i < zbLen && zb[i] == 0; i++ {} + } + if stripTrailing { + // Work around old OpenPGP.js bug where insignificant trailing zeros in + // this little-endian number are missing. + // (See https://github.com/openpgpjs/openpgpjs/pull/853.) + for ; j >= 0 && zb[j] == 0; j-- {} + } + if _, err := h.Write(zb[i:j+1]); err != nil { + return nil, err + } + if _, err := h.Write(param.Bytes()); err != nil { + return nil, err + } + mb := h.Sum(nil) + + return mb[:pub.KDF.Cipher.KeySize()], nil // return oBits leftmost bits of MB. + +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go new file mode 100644 index 0000000000..15b41a31df --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/ecdh/x25519.go @@ -0,0 +1,157 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ecdh implements ECDH encryption, suitable for OpenPGP, +// as specified in RFC 6637, section 8. +package ecdh + +import ( + "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/aes/keywrap" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "golang.org/x/crypto/curve25519" +) + +// Generates a private-public key-pair. +// 'priv' is a private key; a scalar belonging to the set +// 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of the +// curve. 'pub' is simply 'priv' * G where G is the base point. +// See https://cr.yp.to/ecdh.html and RFC7748, sec 5. +func x25519GenerateKeyPairBytes(rand io.Reader) (priv [32]byte, pub [32]byte, err error) { + var n, helper = new(big.Int), new(big.Int) + n.SetUint64(1) + n.Lsh(n, 252) + helper.SetString("27742317777372353535851937790883648493", 10) + n.Add(n, helper) + + for true { + _, err = io.ReadFull(rand, priv[:]) + if err != nil { + return + } + // The following ensures that the private key is a number of the form + // 2^{254} + 8 * [0, 2^{251}), in order to avoid the small subgroup of + // of the curve. + priv[0] &= 248 + priv[31] &= 127 + priv[31] |= 64 + + // If the scalar is out of range, sample another random number. + if new(big.Int).SetBytes(priv[:]).Cmp(n) >= 0 { + continue + } + + curve25519.ScalarBaseMult(&pub, &priv) + return + } + return +} + +// X25519GenerateKey samples the key pair according to the correct distribution. +// It also sets the given key-derivation function and returns the *PrivateKey +// object along with an error. +func X25519GenerateKey(rand io.Reader, kdf KDF) (priv *PrivateKey, err error) { + ci := ecc.FindByName("Curve25519") + priv = new(PrivateKey) + priv.PublicKey.Curve = ci.Curve + d, pubKey, err := x25519GenerateKeyPairBytes(rand) + if err != nil { + return nil, err + } + priv.PublicKey.KDF = kdf + priv.D = make([]byte, 32) + copyReversed(priv.D, d[:]) + priv.PublicKey.CurveType = ci.CurveType + priv.PublicKey.Curve = ci.Curve + /* + * Note that ECPoint.point differs from the definition of public keys in + * [Curve25519] in two ways: (1) the byte-ordering is big-endian, which is + * more uniform with how big integers are represented in TLS, and (2) there + * is an additional length byte (so ECpoint.point is actually 33 bytes), + * again for uniformity (and extensibility). + */ + var encodedKey = make([]byte, 33) + encodedKey[0] = 0x40 + copy(encodedKey[1:], pubKey[:]) + priv.PublicKey.X = new(big.Int).SetBytes(encodedKey[:]) + priv.PublicKey.Y = new(big.Int) + return priv, nil +} + +func X25519Encrypt(random io.Reader, pub *PublicKey, msg, curveOID, fingerprint []byte) (vsG, c []byte, err error) { + d, ephemeralKey, err := x25519GenerateKeyPairBytes(random) + if err != nil { + return nil, nil, err + } + var pubKey [32]byte + + if pub.X.BitLen() > 33*264 { + return nil, nil, errors.New("ecdh: invalid key") + } + copy(pubKey[:], pub.X.Bytes()[1:]) + + var zb [32]byte + curve25519.ScalarBaseMult(&zb, &d) + curve25519.ScalarMult(&zb, &d, &pubKey) + z, err := buildKey(pub, zb[:], curveOID, fingerprint, false, false) + + if err != nil { + return nil, nil, err + } + + if c, err = keywrap.Wrap(z, msg); err != nil { + return nil, nil, err + } + + var vsg [33]byte + vsg[0] = 0x40 + copy(vsg[1:], ephemeralKey[:]) + + return vsg[:], c, nil +} + +func X25519Decrypt(priv *PrivateKey, vsG, m, curveOID, fingerprint []byte) (msg []byte, err error) { + var zb, d, ephemeralKey [32]byte + if len(vsG) != 33 || vsG[0] != 0x40 { + return nil, errors.New("ecdh: invalid key") + } + copy(ephemeralKey[:], vsG[1:33]) + + copyReversed(d[:], priv.D) + curve25519.ScalarBaseMult(&zb, &d) + curve25519.ScalarMult(&zb, &d, &ephemeralKey) + + var c []byte + + for i := 0; i < 3; i++ { + // Try buildKey three times for compat, see comments in buildKey. + z, err := buildKey(&priv.PublicKey, zb[:], curveOID, fingerprint, i == 1, i == 2) + if err != nil { + return nil, err + } + + res, err := keywrap.Unwrap(z, m) + if i == 2 && err != nil { + // Only return an error after we've tried all variants of buildKey. + return nil, err + } + + c = res + if err == nil { + break + } + } + + return c[:len(c)-int(c[len(c)-1])], nil +} + +func copyReversed(out []byte, in []byte) { + l := len(in) + for i := 0; i < l; i++ { + out[i] = in[l-i-1] + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go new file mode 100644 index 0000000000..6a07d8ff27 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/elgamal/elgamal.go @@ -0,0 +1,124 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package elgamal implements ElGamal encryption, suitable for OpenPGP, +// as specified in "A Public-Key Cryptosystem and a Signature Scheme Based on +// Discrete Logarithms," IEEE Transactions on Information Theory, v. IT-31, +// n. 4, 1985, pp. 469-472. +// +// This form of ElGamal embeds PKCS#1 v1.5 padding, which may make it +// unsuitable for other protocols. RSA should be used in preference in any +// case. +package elgamal // import "github.com/ProtonMail/go-crypto/openpgp/elgamal" + +import ( + "crypto/rand" + "crypto/subtle" + "errors" + "io" + "math/big" +) + +// PublicKey represents an ElGamal public key. +type PublicKey struct { + G, P, Y *big.Int +} + +// PrivateKey represents an ElGamal private key. +type PrivateKey struct { + PublicKey + X *big.Int +} + +// Encrypt encrypts the given message to the given public key. The result is a +// pair of integers. Errors can result from reading random, or because msg is +// too large to be encrypted to the public key. +func Encrypt(random io.Reader, pub *PublicKey, msg []byte) (c1, c2 *big.Int, err error) { + pLen := (pub.P.BitLen() + 7) / 8 + if len(msg) > pLen-11 { + err = errors.New("elgamal: message too long") + return + } + + // EM = 0x02 || PS || 0x00 || M + em := make([]byte, pLen-1) + em[0] = 2 + ps, mm := em[1:len(em)-len(msg)-1], em[len(em)-len(msg):] + err = nonZeroRandomBytes(ps, random) + if err != nil { + return + } + em[len(em)-len(msg)-1] = 0 + copy(mm, msg) + + m := new(big.Int).SetBytes(em) + + k, err := rand.Int(random, pub.P) + if err != nil { + return + } + + c1 = new(big.Int).Exp(pub.G, k, pub.P) + s := new(big.Int).Exp(pub.Y, k, pub.P) + c2 = s.Mul(s, m) + c2.Mod(c2, pub.P) + + return +} + +// Decrypt takes two integers, resulting from an ElGamal encryption, and +// returns the plaintext of the message. An error can result only if the +// ciphertext is invalid. Users should keep in mind that this is a padding +// oracle and thus, if exposed to an adaptive chosen ciphertext attack, can +// be used to break the cryptosystem. See ``Chosen Ciphertext Attacks +// Against Protocols Based on the RSA Encryption Standard PKCS #1'', Daniel +// Bleichenbacher, Advances in Cryptology (Crypto '98), +func Decrypt(priv *PrivateKey, c1, c2 *big.Int) (msg []byte, err error) { + s := new(big.Int).Exp(c1, priv.X, priv.P) + if s.ModInverse(s, priv.P) == nil { + return nil, errors.New("elgamal: invalid private key") + } + s.Mul(s, c2) + s.Mod(s, priv.P) + em := s.Bytes() + + firstByteIsTwo := subtle.ConstantTimeByteEq(em[0], 2) + + // The remainder of the plaintext must be a string of non-zero random + // octets, followed by a 0, followed by the message. + // lookingForIndex: 1 iff we are still looking for the zero. + // index: the offset of the first zero byte. + var lookingForIndex, index int + lookingForIndex = 1 + + for i := 1; i < len(em); i++ { + equals0 := subtle.ConstantTimeByteEq(em[i], 0) + index = subtle.ConstantTimeSelect(lookingForIndex&equals0, i, index) + lookingForIndex = subtle.ConstantTimeSelect(equals0, 0, lookingForIndex) + } + + if firstByteIsTwo != 1 || lookingForIndex != 0 || index < 9 { + return nil, errors.New("elgamal: decryption error") + } + return em[index+1:], nil +} + +// nonZeroRandomBytes fills the given slice with non-zero random octets. +func nonZeroRandomBytes(s []byte, rand io.Reader) (err error) { + _, err = io.ReadFull(rand, s) + if err != nil { + return + } + + for i := 0; i < len(s); i++ { + for s[i] == 0 { + _, err = io.ReadFull(rand, s[i:i+1]) + if err != nil { + return + } + } + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go new file mode 100644 index 0000000000..17e2bcfed2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/errors/errors.go @@ -0,0 +1,116 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors contains common error types for the OpenPGP packages. +package errors // import "github.com/ProtonMail/go-crypto/openpgp/errors" + +import ( + "strconv" +) + +// A StructuralError is returned when OpenPGP data is found to be syntactically +// invalid. +type StructuralError string + +func (s StructuralError) Error() string { + return "openpgp: invalid data: " + string(s) +} + +// UnsupportedError indicates that, although the OpenPGP data is valid, it +// makes use of currently unimplemented features. +type UnsupportedError string + +func (s UnsupportedError) Error() string { + return "openpgp: unsupported feature: " + string(s) +} + +// InvalidArgumentError indicates that the caller is in error and passed an +// incorrect value. +type InvalidArgumentError string + +func (i InvalidArgumentError) Error() string { + return "openpgp: invalid argument: " + string(i) +} + +// SignatureError indicates that a syntactically valid signature failed to +// validate. +type SignatureError string + +func (b SignatureError) Error() string { + return "openpgp: invalid signature: " + string(b) +} + +var ErrMDCHashMismatch error = SignatureError("MDC hash mismatch") +var ErrMDCMissing error = SignatureError("MDC packet not found") + +type signatureExpiredError int + +func (se signatureExpiredError) Error() string { + return "openpgp: signature expired" +} + +var ErrSignatureExpired error = signatureExpiredError(0) + +type keyExpiredError int + +func (ke keyExpiredError) Error() string { + return "openpgp: key expired" +} + +var ErrKeyExpired error = keyExpiredError(0) + +type keyIncorrectError int + +func (ki keyIncorrectError) Error() string { + return "openpgp: incorrect key" +} + +var ErrKeyIncorrect error = keyIncorrectError(0) + +// KeyInvalidError indicates that the public key parameters are invalid +// as they do not match the private ones +type KeyInvalidError string + +func (e KeyInvalidError) Error() string { + return "openpgp: invalid key: " + string(e) +} + +type unknownIssuerError int + +func (unknownIssuerError) Error() string { + return "openpgp: signature made by unknown entity" +} + +var ErrUnknownIssuer error = unknownIssuerError(0) + +type keyRevokedError int + +func (keyRevokedError) Error() string { + return "openpgp: signature made by revoked key" +} + +var ErrKeyRevoked error = keyRevokedError(0) + +type UnknownPacketTypeError uint8 + +func (upte UnknownPacketTypeError) Error() string { + return "openpgp: unknown packet type: " + strconv.Itoa(int(upte)) +} + +// AEADError indicates that there is a problem when initializing or using a +// AEAD instance, configuration struct, nonces or index values. +type AEADError string + +func (ae AEADError) Error() string { + return "openpgp: aead error: " + string(ae) +} + +// ErrDummyPrivateKey results when operations are attempted on a private key +// that is just a dummy key. See +// https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 +type ErrDummyPrivateKey string + +func (dke ErrDummyPrivateKey) Error() string { + return "openpgp: s2k GNU dummy key: " + string(dke) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go new file mode 100644 index 0000000000..17a1bfe9cb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/aead.go @@ -0,0 +1,65 @@ +// Copyright (C) 2019 ProtonTech AG + +package algorithm + +import ( + "crypto/cipher" + "github.com/ProtonMail/go-crypto/eax" + "github.com/ProtonMail/go-crypto/ocb" +) + +// AEADMode defines the Authenticated Encryption with Associated Data mode of +// operation. +type AEADMode uint8 + +// Supported modes of operation (see RFC4880bis [EAX] and RFC7253) +const ( + AEADModeEAX = AEADMode(1) + AEADModeOCB = AEADMode(2) + AEADModeGCM = AEADMode(100) +) + +// TagLength returns the length in bytes of authentication tags. +func (mode AEADMode) TagLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 16 + case AEADModeGCM: + return 16 + default: + return 0 + } +} + +// NonceLength returns the length in bytes of nonces. +func (mode AEADMode) NonceLength() int { + switch mode { + case AEADModeEAX: + return 16 + case AEADModeOCB: + return 15 + case AEADModeGCM: + return 12 + default: + return 0 + } +} + +// New returns a fresh instance of the given mode +func (mode AEADMode) New(block cipher.Block) (alg cipher.AEAD) { + var err error + switch mode { + case AEADModeEAX: + alg, err = eax.NewEAX(block) + case AEADModeOCB: + alg, err = ocb.NewOCB(block) + case AEADModeGCM: + alg, err = cipher.NewGCM(block) + } + if err != nil { + panic(err.Error()) + } + return alg +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go new file mode 100644 index 0000000000..5760cff80e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/cipher.go @@ -0,0 +1,107 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + + "golang.org/x/crypto/cast5" +) + +// Cipher is an official symmetric key cipher algorithm. See RFC 4880, +// section 9.2. +type Cipher interface { + // Id returns the algorithm ID, as a byte, of the cipher. + Id() uint8 + // KeySize returns the key size, in bytes, of the cipher. + KeySize() int + // BlockSize returns the block size, in bytes, of the cipher. + BlockSize() int + // New returns a fresh instance of the given cipher. + New(key []byte) cipher.Block +} + +// The following constants mirror the OpenPGP standard (RFC 4880). +const ( + TripleDES = CipherFunction(2) + CAST5 = CipherFunction(3) + AES128 = CipherFunction(7) + AES192 = CipherFunction(8) + AES256 = CipherFunction(9) +) + +// CipherById represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +var CipherById = map[uint8]Cipher{ + TripleDES.Id(): TripleDES, + CAST5.Id(): CAST5, + AES128.Id(): AES128, + AES192.Id(): AES192, + AES256.Id(): AES256, +} + +type CipherFunction uint8 + +// ID returns the algorithm Id, as a byte, of cipher. +func (sk CipherFunction) Id() uint8 { + return uint8(sk) +} + +var keySizeByID = map[uint8]int{ + TripleDES.Id(): 24, + CAST5.Id(): cast5.KeySize, + AES128.Id(): 16, + AES192.Id(): 24, + AES256.Id(): 32, +} + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + switch cipher { + case TripleDES: + return 24 + case CAST5: + return cast5.KeySize + case AES128: + return 16 + case AES192: + return 24 + case AES256: + return 32 + } + return 0 +} + +// BlockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) BlockSize() int { + switch cipher { + case TripleDES: + return des.BlockSize + case CAST5: + return 8 + case AES128, AES192, AES256: + return 16 + } + return 0 +} + +// New returns a fresh instance of the given cipher. +func (cipher CipherFunction) New(key []byte) (block cipher.Block) { + var err error + switch cipher { + case TripleDES: + block, err = des.NewTripleDESCipher(key) + case CAST5: + block, err = cast5.NewCipher(key) + case AES128, AES192, AES256: + block, err = aes.NewCipher(key) + } + if err != nil { + panic(err.Error()) + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go new file mode 100644 index 0000000000..3f1b61b88e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/algorithm/hash.go @@ -0,0 +1,86 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package algorithm + +import ( + "crypto" + "fmt" + "hash" +) + +// Hash is an official hash function algorithm. See RFC 4880, section 9.4. +type Hash interface { + // Id returns the algorithm ID, as a byte, of Hash. + Id() uint8 + // Available reports whether the given hash function is linked into the binary. + Available() bool + // HashFunc simply returns the value of h so that Hash implements SignerOpts. + HashFunc() crypto.Hash + // New returns a new hash.Hash calculating the given hash function. New + // panics if the hash function is not linked into the binary. + New() hash.Hash + // Size returns the length, in bytes, of a digest resulting from the given + // hash function. It doesn't require that the hash function in question be + // linked into the program. + Size() int + // String is the name of the hash function corresponding to the given + // OpenPGP hash id. + String() string +} + +// The following vars mirror the crypto/Hash supported hash functions. +var ( + MD5 Hash = cryptoHash{1, crypto.MD5} + SHA1 Hash = cryptoHash{2, crypto.SHA1} + RIPEMD160 Hash = cryptoHash{3, crypto.RIPEMD160} + SHA256 Hash = cryptoHash{8, crypto.SHA256} + SHA384 Hash = cryptoHash{9, crypto.SHA384} + SHA512 Hash = cryptoHash{10, crypto.SHA512} + SHA224 Hash = cryptoHash{11, crypto.SHA224} +) + +// HashById represents the different hash functions specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-14 +var ( + HashById = map[uint8]Hash{ + MD5.Id(): MD5, + SHA1.Id(): SHA1, + RIPEMD160.Id(): RIPEMD160, + SHA256.Id(): SHA256, + SHA384.Id(): SHA384, + SHA512.Id(): SHA512, + SHA224.Id(): SHA224, + } +) + +// cryptoHash contains pairs relating OpenPGP's hash identifier with +// Go's crypto.Hash type. See RFC 4880, section 9.4. +type cryptoHash struct { + id uint8 + crypto.Hash +} + +// Id returns the algorithm ID, as a byte, of cryptoHash. +func (h cryptoHash) Id() uint8 { + return h.id +} + +var hashNames = map[uint8]string{ + MD5.Id(): "MD5", + SHA1.Id(): "SHA1", + RIPEMD160.Id(): "RIPEMD160", + SHA256.Id(): "SHA256", + SHA384.Id(): "SHA384", + SHA512.Id(): "SHA512", + SHA224.Id(): "SHA224", +} + +func (h cryptoHash) String() string { + s, ok := hashNames[h.id] + if !ok { + panic(fmt.Sprintf("Unsupported hash function %d", h.id)) + } + return s +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go new file mode 100644 index 0000000000..f91042fd85 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveInfo.go @@ -0,0 +1,118 @@ +package ecc + +import ( + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "crypto/elliptic" + "bytes" + "github.com/ProtonMail/go-crypto/bitcurves" + "github.com/ProtonMail/go-crypto/brainpool" +) + +type SignatureAlgorithm uint8 + +const ( + ECDSA SignatureAlgorithm = 1 + EdDSA SignatureAlgorithm = 2 +) + +type CurveInfo struct { + Name string + Oid *encoding.OID + Curve elliptic.Curve + SigAlgorithm SignatureAlgorithm + CurveType CurveType +} + +var curves = []CurveInfo{ + { + Name: "NIST curve P-256", + Oid: encoding.NewOID([]byte{0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07}), + Curve: elliptic.P256(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "NIST curve P-384", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x22}), + Curve: elliptic.P384(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "NIST curve P-521", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x23}), + Curve: elliptic.P521(), + CurveType: NISTCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "SecP256k1", + Oid: encoding.NewOID([]byte{0x2B, 0x81, 0x04, 0x00, 0x0A}), + Curve: bitcurves.S256(), + CurveType: BitCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "Curve25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0x97, 0x55, 0x01, 0x05, 0x01}), + Curve: elliptic.P256(),// filler + CurveType: Curve25519, + SigAlgorithm: ECDSA, + }, + { + Name: "Ed25519", + Oid: encoding.NewOID([]byte{0x2B, 0x06, 0x01, 0x04, 0x01, 0xDA, 0x47, 0x0F, 0x01}), + Curve: elliptic.P256(), // filler + CurveType: NISTCurve, + SigAlgorithm: EdDSA, + }, + { + Name: "Brainpool P256r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x07}), + Curve: brainpool.P256r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "BrainpoolP384r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0B}), + Curve: brainpool.P384r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, + { + Name: "BrainpoolP512r1", + Oid: encoding.NewOID([]byte{0x2B, 0x24, 0x03, 0x03, 0x02, 0x08, 0x01, 0x01, 0x0D}), + Curve: brainpool.P512r1(), + CurveType: BrainpoolCurve, + SigAlgorithm: ECDSA, + }, +} + +func FindByCurve(curve elliptic.Curve) *CurveInfo { + for _, curveInfo := range curves { + if curveInfo.Curve == curve { + return &curveInfo + } + } + return nil +} + +func FindByOid(oid encoding.Field) *CurveInfo { + var rawBytes = oid.Bytes() + for _, curveInfo := range curves { + if bytes.Equal(curveInfo.Oid.Bytes(), rawBytes) { + return &curveInfo + } + } + return nil +} + +func FindByName(name string) *CurveInfo { + for _, curveInfo := range curves { + if curveInfo.Name == name { + return &curveInfo + } + } + return nil +} \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go new file mode 100644 index 0000000000..de8bca0ac2 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/ecc/curveType.go @@ -0,0 +1,10 @@ +package ecc + +type CurveType uint8 + +const ( + NISTCurve CurveType = 1 + Curve25519 CurveType = 2 + BitCurve CurveType = 3 + BrainpoolCurve CurveType = 4 +) \ No newline at end of file diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go new file mode 100644 index 0000000000..6c921481b7 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/encoding.go @@ -0,0 +1,27 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package encoding implements openpgp packet field encodings as specified in +// RFC 4880 and 6637. +package encoding + +import "io" + +// Field is an encoded field of an openpgp packet. +type Field interface { + // Bytes returns the decoded data. + Bytes() []byte + + // BitLength is the size in bits of the decoded data. + BitLength() uint16 + + // EncodedBytes returns the encoded data. + EncodedBytes() []byte + + // EncodedLength is the size in bytes of the encoded data. + EncodedLength() uint16 + + // ReadFrom reads the next Field from r. + ReadFrom(r io.Reader) (int64, error) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go new file mode 100644 index 0000000000..02e5e695c3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/mpi.go @@ -0,0 +1,91 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + "math/big" + "math/bits" +) + +// An MPI is used to store the contents of a big integer, along with the bit +// length that was specified in the original input. This allows the MPI to be +// reserialized exactly. +type MPI struct { + bytes []byte + bitLength uint16 +} + +// NewMPI returns a MPI initialized with bytes. +func NewMPI(bytes []byte) *MPI { + for len(bytes) != 0 && bytes[0] == 0 { + bytes = bytes[1:] + } + if len(bytes) == 0 { + bitLength := uint16(0) + return &MPI{bytes, bitLength} + } + bitLength := 8*uint16(len(bytes)-1) + uint16(bits.Len8(bytes[0])) + return &MPI{bytes, bitLength} +} + +// Bytes returns the decoded data. +func (m *MPI) Bytes() []byte { + return m.bytes +} + +// BitLength is the size in bits of the decoded data. +func (m *MPI) BitLength() uint16 { + return m.bitLength +} + +// EncodedBytes returns the encoded data. +func (m *MPI) EncodedBytes() []byte { + return append([]byte{byte(m.bitLength >> 8), byte(m.bitLength)}, m.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (m *MPI) EncodedLength() uint16 { + return uint16(2 + len(m.bytes)) +} + +// ReadFrom reads into m the next MPI from r. +func (m *MPI) ReadFrom(r io.Reader) (int64, error) { + var buf [2]byte + n, err := io.ReadFull(r, buf[0:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + m.bitLength = uint16(buf[0])<<8 | uint16(buf[1]) + m.bytes = make([]byte, (int(m.bitLength)+7)/8) + + nn, err := io.ReadFull(r, m.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + // remove leading zero bytes from malformed GnuPG encoded MPIs: + // https://bugs.gnupg.org/gnupg/issue1853 + // for _, b := range m.bytes { + // if b != 0 { + // break + // } + // m.bytes = m.bytes[1:] + // m.bitLength -= 8 + // } + + return int64(n) + int64(nn), err +} + +// SetBig initializes m with the bits from n. +func (m *MPI) SetBig(n *big.Int) *MPI { + m.bytes = n.Bytes() + m.bitLength = uint16(n.BitLen()) + return m +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go new file mode 100644 index 0000000000..ee39fd6bbb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/internal/encoding/oid.go @@ -0,0 +1,88 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package encoding + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OID is used to store a variable-length field with a one-octet size +// prefix. See https://tools.ietf.org/html/rfc6637#section-9. +type OID struct { + bytes []byte +} + +const ( + // maxOID is the maximum number of bytes in a OID. + maxOID = 254 + // reservedOIDLength1 and reservedOIDLength2 are OID lengths that the RFC + // specifies are reserved. + reservedOIDLength1 = 0 + reservedOIDLength2 = 0xff +) + +// NewOID returns a OID initialized with bytes. +func NewOID(bytes []byte) *OID { + switch len(bytes) { + case reservedOIDLength1, reservedOIDLength2: + panic("encoding: NewOID argument length is reserved") + default: + if len(bytes) > maxOID { + panic("encoding: NewOID argment too large") + } + } + + return &OID{ + bytes: bytes, + } +} + +// Bytes returns the decoded data. +func (o *OID) Bytes() []byte { + return o.bytes +} + +// BitLength is the size in bits of the decoded data. +func (o *OID) BitLength() uint16 { + return uint16(len(o.bytes) * 8) +} + +// EncodedBytes returns the encoded data. +func (o *OID) EncodedBytes() []byte { + return append([]byte{byte(len(o.bytes))}, o.bytes...) +} + +// EncodedLength is the size in bytes of the encoded data. +func (o *OID) EncodedLength() uint16 { + return uint16(1 + len(o.bytes)) +} + +// ReadFrom reads into b the next OID from r. +func (o *OID) ReadFrom(r io.Reader) (int64, error) { + var buf [1]byte + n, err := io.ReadFull(r, buf[:]) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return int64(n), err + } + + switch buf[0] { + case reservedOIDLength1, reservedOIDLength2: + return int64(n), errors.UnsupportedError("reserved for future extensions") + } + + o.bytes = make([]byte, buf[0]) + + nn, err := io.ReadFull(r, o.bytes) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + + return int64(n) + int64(nn), err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go new file mode 100644 index 0000000000..0d2eb45b8e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/key_generation.go @@ -0,0 +1,375 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + goerrors "errors" + "io" + "math/big" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "golang.org/x/crypto/ed25519" +) + +// NewEntity returns an Entity that contains a fresh RSA/RSA keypair with a +// single identity composed of the given full name, comment and email, any of +// which may be empty but must not contain any of "()<>\x00". +// If config is nil, sensible defaults will be used. +func NewEntity(name, comment, email string, config *packet.Config) (*Entity, error) { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + uid := packet.NewUserId(name, comment, email) + if uid == nil { + return nil, errors.InvalidArgumentError("user id field contained invalid characters") + } + + // Generate a primary signing key + primaryPrivRaw, err := newSigner(config) + if err != nil { + return nil, err + } + primary := packet.NewSignerPrivateKey(creationTime, primaryPrivRaw) + if config != nil && config.V5Keys { + primary.UpgradeToV5() + } + + isPrimaryId := true + selfSignature := &packet.Signature{ + Version: primary.PublicKey.Version, + SigType: packet.SigTypePositiveCert, + PubKeyAlgo: primary.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + IssuerKeyId: &primary.PublicKey.KeyId, + IssuerFingerprint: primary.PublicKey.Fingerprint, + IsPrimaryId: &isPrimaryId, + FlagsValid: true, + FlagSign: true, + FlagCertify: true, + MDC: true, // true by default, see 5.8 vs. 5.14 + AEAD: config.AEAD() != nil, + V5Keys: config != nil && config.V5Keys, + } + + // Set the PreferredHash for the SelfSignature from the packet.Config. + // If it is not the must-implement algorithm from rfc4880bis, append that. + selfSignature.PreferredHash = []uint8{hashToHashId(config.Hash())} + if config.Hash() != crypto.SHA256 { + selfSignature.PreferredHash = append(selfSignature.PreferredHash, hashToHashId(crypto.SHA256)) + } + + // Likewise for DefaultCipher. + selfSignature.PreferredSymmetric = []uint8{uint8(config.Cipher())} + if config.Cipher() != packet.CipherAES128 { + selfSignature.PreferredSymmetric = append(selfSignature.PreferredSymmetric, uint8(packet.CipherAES128)) + } + + // And for DefaultMode. + selfSignature.PreferredAEAD = []uint8{uint8(config.AEAD().Mode())} + if config.AEAD().Mode() != packet.AEADModeEAX { + selfSignature.PreferredAEAD = append(selfSignature.PreferredAEAD, uint8(packet.AEADModeEAX)) + } + + // User ID binding signature + err = selfSignature.SignUserId(uid.Id, &primary.PublicKey, primary, config) + if err != nil { + return nil, err + } + + // Generate an encryption subkey + subPrivRaw, err := newDecrypter(config) + if err != nil { + return nil, err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + sub.IsSubkey = true + sub.PublicKey.IsSubkey = true + if config != nil && config.V5Keys { + sub.UpgradeToV5() + } + + // NOTE: No KeyLifetimeSecs here, but we will not return this subkey in EncryptionKey() + // if the primary/master key has expired. + subKey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: primary.PublicKey.Version, + CreationTime: creationTime, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: primary.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &primary.PublicKey.KeyId, + }, + } + + // Subkey binding signature + err = subKey.Sig.SignKey(subKey.PublicKey, primary, config) + if err != nil { + return nil, err + } + + return &Entity{ + PrimaryKey: &primary.PublicKey, + PrivateKey: primary, + Identities: map[string]*Identity{ + uid.Id: &Identity{ + Name: uid.Id, + UserId: uid, + SelfSignature: selfSignature, + Signatures: []*packet.Signature{selfSignature}, + }, + }, + Subkeys: []Subkey{subKey}, + }, nil +} + +// AddSigningSubkey adds a signing keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddSigningSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newSigner(config) + if err != nil { + return err + } + sub := packet.NewSignerPrivateKey(creationTime, subPrivRaw) + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagSign: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + EmbeddedSignature: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + SigType: packet.SigTypePrimaryKeyBinding, + PubKeyAlgo: sub.PublicKey.PubKeyAlgo, + Hash: config.Hash(), + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + }, + } + if config != nil && config.V5Keys { + subkey.PublicKey.UpgradeToV5() + } + + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, subkey.PrivateKey, config) + if err != nil { + return err + } + + subkey.PublicKey.IsSubkey = true + subkey.PrivateKey.IsSubkey = true + if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// AddEncryptionSubkey adds an encryption keypair as a subkey to the Entity. +// If config is nil, sensible defaults will be used. +func (e *Entity) AddEncryptionSubkey(config *packet.Config) error { + creationTime := config.Now() + keyLifetimeSecs := config.KeyLifetime() + + subPrivRaw, err := newDecrypter(config) + if err != nil { + return err + } + sub := packet.NewDecrypterPrivateKey(creationTime, subPrivRaw) + + subkey := Subkey{ + PublicKey: &sub.PublicKey, + PrivateKey: sub, + Sig: &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: creationTime, + KeyLifetimeSecs: &keyLifetimeSecs, + SigType: packet.SigTypeSubkeyBinding, + PubKeyAlgo: e.PrimaryKey.PubKeyAlgo, + Hash: config.Hash(), + FlagsValid: true, + FlagEncryptStorage: true, + FlagEncryptCommunications: true, + IssuerKeyId: &e.PrimaryKey.KeyId, + }, + } + if config != nil && config.V5Keys { + subkey.PublicKey.UpgradeToV5() + } + + subkey.PublicKey.IsSubkey = true + subkey.PrivateKey.IsSubkey = true + if err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + e.Subkeys = append(e.Subkeys, subkey) + return nil +} + +// Generates a signing key +func newSigner(config *packet.Config) (signer crypto.Signer, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + _, priv, err := ed25519.GenerateKey(config.Random()) + if err != nil { + return nil, err + } + return &priv, nil + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +// Generates an encryption/decryption key +func newDecrypter(config *packet.Config) (decrypter interface{}, err error) { + switch config.PublicKeyAlgorithm() { + case packet.PubKeyAlgoRSA: + bits := config.RSAModulusBits() + if bits < 1024 { + return nil, errors.InvalidArgumentError("bits must be >= 1024") + } + if config != nil && len(config.RSAPrimes) >= 2 { + primes := config.RSAPrimes[0:2] + config.RSAPrimes = config.RSAPrimes[2:] + return generateRSAKeyWithPrimes(config.Random(), 2, bits, primes) + } + return rsa.GenerateKey(config.Random(), bits) + case packet.PubKeyAlgoEdDSA: + fallthrough // When passing EdDSA, we generate an ECDH subkey + case packet.PubKeyAlgoECDH: + var kdf = ecdh.KDF{ + Hash: algorithm.SHA512, + Cipher: algorithm.AES256, + } + return ecdh.X25519GenerateKey(config.Random(), kdf) + default: + return nil, errors.InvalidArgumentError("unsupported public key algorithm") + } +} + +var bigOne = big.NewInt(1) + +// generateRSAKeyWithPrimes generates a multi-prime RSA keypair of the +// given bit size, using the given random source and prepopulated primes. +func generateRSAKeyWithPrimes(random io.Reader, nprimes int, bits int, prepopulatedPrimes []*big.Int) (*rsa.PrivateKey, error) { + priv := new(rsa.PrivateKey) + priv.E = 65537 + + if nprimes < 2 { + return nil, goerrors.New("generateRSAKeyWithPrimes: nprimes must be >= 2") + } + + if bits < 1024 { + return nil, goerrors.New("generateRSAKeyWithPrimes: bits must be >= 1024") + } + + primes := make([]*big.Int, nprimes) + +NextSetOfPrimes: + for { + todo := bits + // crypto/rand should set the top two bits in each prime. + // Thus each prime has the form + // p_i = 2^bitlen(p_i) × 0.11... (in base 2). + // And the product is: + // P = 2^todo × α + // where α is the product of nprimes numbers of the form 0.11... + // + // If α < 1/2 (which can happen for nprimes > 2), we need to + // shift todo to compensate for lost bits: the mean value of 0.11... + // is 7/8, so todo + shift - nprimes * log2(7/8) ~= bits - 1/2 + // will give good results. + if nprimes >= 7 { + todo += (nprimes - 2) / 5 + } + for i := 0; i < nprimes; i++ { + var err error + if len(prepopulatedPrimes) == 0 { + primes[i], err = rand.Prime(random, todo/(nprimes-i)) + if err != nil { + return nil, err + } + } else { + primes[i] = prepopulatedPrimes[0] + prepopulatedPrimes = prepopulatedPrimes[1:] + } + + todo -= primes[i].BitLen() + } + + // Make sure that primes is pairwise unequal. + for i, prime := range primes { + for j := 0; j < i; j++ { + if prime.Cmp(primes[j]) == 0 { + continue NextSetOfPrimes + } + } + } + + n := new(big.Int).Set(bigOne) + totient := new(big.Int).Set(bigOne) + pminus1 := new(big.Int) + for _, prime := range primes { + n.Mul(n, prime) + pminus1.Sub(prime, bigOne) + totient.Mul(totient, pminus1) + } + if n.BitLen() != bits { + // This should never happen for nprimes == 2 because + // crypto/rand should set the top two bits in each prime. + // For nprimes > 2 we hope it does not happen often. + continue NextSetOfPrimes + } + + priv.D = new(big.Int) + e := big.NewInt(int64(priv.E)) + ok := priv.D.ModInverse(e, totient) + + if ok != nil { + priv.Primes = primes + priv.N = n + break + } + } + + priv.Precompute() + return priv, nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go new file mode 100644 index 0000000000..c3fedf7a25 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys.go @@ -0,0 +1,707 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + goerrors "errors" + "io" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// PublicKeyType is the armor type for a PGP public key. +var PublicKeyType = "PGP PUBLIC KEY BLOCK" + +// PrivateKeyType is the armor type for a PGP private key. +var PrivateKeyType = "PGP PRIVATE KEY BLOCK" + +// An Entity represents the components of an OpenPGP key: a primary public key +// (which must be a signing key), one or more identities claimed by that key, +// and zero or more subkeys, which may be encryption keys. +type Entity struct { + PrimaryKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Identities map[string]*Identity // indexed by Identity.Name + Revocations []*packet.Signature + Subkeys []Subkey +} + +// An Identity represents an identity claimed by an Entity and zero or more +// assertions by other entities about that claim. +type Identity struct { + Name string // by convention, has the form "Full Name (comment) " + UserId *packet.UserId + SelfSignature *packet.Signature + Signatures []*packet.Signature +} + +// A Subkey is an additional public key in an Entity. Subkeys can be used for +// encryption. +type Subkey struct { + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + Sig *packet.Signature +} + +// A Key identifies a specific public key in an Entity. This is either the +// Entity's primary key or a subkey. +type Key struct { + Entity *Entity + PublicKey *packet.PublicKey + PrivateKey *packet.PrivateKey + SelfSignature *packet.Signature +} + +// A KeyRing provides access to public and private keys. +type KeyRing interface { + // KeysById returns the set of keys that have the given key id. + KeysById(id uint64) []Key + // KeysByIdAndUsage returns the set of keys with the given id + // that also meet the key usage given by requiredUsage. + // The requiredUsage is expressed as the bitwise-OR of + // packet.KeyFlag* values. + KeysByIdUsage(id uint64, requiredUsage byte) []Key + // DecryptionKeys returns all private keys that are valid for + // decryption. + DecryptionKeys() []Key +} + +// PrimaryIdentity returns the Identity marked as primary or the first identity +// if none are so marked. +func (e *Entity) PrimaryIdentity() *Identity { + var firstIdentity *Identity + for _, ident := range e.Identities { + if firstIdentity == nil { + firstIdentity = ident + } + if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + return ident + } + } + return firstIdentity +} + +// EncryptionKey returns the best candidate Key for encrypting a message to the +// given Entity. +func (e *Entity) EncryptionKey(now time.Time) (Key, bool) { + // Fail to find any encryption key if the primary key has expired. + i := e.PrimaryIdentity() + primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) + if primaryKeyExpired { + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for i, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagEncryptCommunications && + subkey.PublicKey.PubKeyAlgo.CanEncrypt() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) { + candidateSubkey = i + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we don't have any candidate subkeys for encryption and + // the primary key doesn't have any usage metadata then we + // assume that the primary key is ok. Or, if the primary key is + // marked as ok to encrypt with, then we can obviously use it. + // Also, check expiry again just to be safe. + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications && + e.PrimaryKey.PubKeyAlgo.CanEncrypt() && !primaryKeyExpired { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + return Key{}, false +} + +// SigningKey return the best candidate Key for signing a message with this +// Entity. +func (e *Entity) SigningKey(now time.Time) (Key, bool) { + return e.SigningKeyById(now, 0) +} + +// SigningKeyById return the Key for signing a message with this +// Entity and keyID. +func (e *Entity) SigningKeyById(now time.Time, id uint64) (Key, bool) { + // Fail to find any signing key if the primary key has expired. + i := e.PrimaryIdentity() + primaryKeyExpired := e.PrimaryKey.KeyExpired(i.SelfSignature, now) + if primaryKeyExpired { + return Key{}, false + } + + // Iterate the keys to find the newest, unexpired one + candidateSubkey := -1 + var maxTime time.Time + for idx, subkey := range e.Subkeys { + if subkey.Sig.FlagsValid && + subkey.Sig.FlagSign && + subkey.PublicKey.PubKeyAlgo.CanSign() && + !subkey.PublicKey.KeyExpired(subkey.Sig, now) && + (maxTime.IsZero() || subkey.Sig.CreationTime.After(maxTime)) && + (id == 0 || subkey.PrivateKey.KeyId == id) { + candidateSubkey = idx + maxTime = subkey.Sig.CreationTime + } + } + + if candidateSubkey != -1 { + subkey := e.Subkeys[candidateSubkey] + return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true + } + + // If we have no candidate subkey then we assume that it's ok to sign + // with the primary key. Or, if the primary key is marked as ok to + // sign with, then we can use it. Also, check expiry again just to be safe. + if !i.SelfSignature.FlagsValid || i.SelfSignature.FlagSign && + e.PrimaryKey.PubKeyAlgo.CanSign() && !primaryKeyExpired && + (id == 0 || e.PrivateKey.KeyId == id) { + return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true + } + + // No keys with a valid Signing Flag or no keys matched the id passed in + return Key{}, false +} + +// An EntityList contains one or more Entities. +type EntityList []*Entity + +// KeysById returns the set of keys that have the given key id. +func (el EntityList) KeysById(id uint64) (keys []Key) { + for _, e := range el { + if e.PrimaryKey.KeyId == id { + var selfSig *packet.Signature + for _, ident := range e.Identities { + if selfSig == nil { + selfSig = ident.SelfSignature + } else if ident.SelfSignature.IsPrimaryId != nil && *ident.SelfSignature.IsPrimaryId { + selfSig = ident.SelfSignature + break + } + } + keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig}) + } + + for _, subKey := range e.Subkeys { + if subKey.PublicKey.KeyId == id { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// KeysByIdAndUsage returns the set of keys with the given id that also meet +// the key usage given by requiredUsage. The requiredUsage is expressed as +// the bitwise-OR of packet.KeyFlag* values. +func (el EntityList) KeysByIdUsage(id uint64, requiredUsage byte) (keys []Key) { + for _, key := range el.KeysById(id) { + if len(key.Entity.Revocations) > 0 { + continue + } + + if key.SelfSignature.RevocationReason != nil { + continue + } + + if key.SelfSignature.FlagsValid && requiredUsage != 0 { + var usage byte + if key.SelfSignature.FlagCertify { + usage |= packet.KeyFlagCertify + } + if key.SelfSignature.FlagSign { + usage |= packet.KeyFlagSign + } + if key.SelfSignature.FlagEncryptCommunications { + usage |= packet.KeyFlagEncryptCommunications + } + if key.SelfSignature.FlagEncryptStorage { + usage |= packet.KeyFlagEncryptStorage + } + if usage&requiredUsage != requiredUsage { + continue + } + } + + keys = append(keys, key) + } + return +} + +// DecryptionKeys returns all private keys that are valid for decryption. +func (el EntityList) DecryptionKeys() (keys []Key) { + for _, e := range el { + for _, subKey := range e.Subkeys { + if subKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) { + keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig}) + } + } + } + return +} + +// ReadArmoredKeyRing reads one or more public/private keys from an armor keyring file. +func ReadArmoredKeyRing(r io.Reader) (EntityList, error) { + block, err := armor.Decode(r) + if err == io.EOF { + return nil, errors.InvalidArgumentError("no armored data found") + } + if err != nil { + return nil, err + } + if block.Type != PublicKeyType && block.Type != PrivateKeyType { + return nil, errors.InvalidArgumentError("expected public or private key block, got: " + block.Type) + } + + return ReadKeyRing(block.Body) +} + +// ReadKeyRing reads one or more public/private keys. Unsupported keys are +// ignored as long as at least a single valid key is found. +func ReadKeyRing(r io.Reader) (el EntityList, err error) { + packets := packet.NewReader(r) + var lastUnsupportedError error + + for { + var e *Entity + e, err = ReadEntity(packets) + if err != nil { + // TODO: warn about skipped unsupported/unreadable keys + if _, ok := err.(errors.UnsupportedError); ok { + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } else if _, ok := err.(errors.StructuralError); ok { + // Skip unreadable, badly-formatted keys + lastUnsupportedError = err + err = readToNextPublicKey(packets) + } + if err == io.EOF { + err = nil + break + } + if err != nil { + el = nil + break + } + } else { + el = append(el, e) + } + } + + if len(el) == 0 && err == nil { + err = lastUnsupportedError + } + return +} + +// readToNextPublicKey reads packets until the start of the entity and leaves +// the first packet of the new entity in the Reader. +func readToNextPublicKey(packets *packet.Reader) (err error) { + var p packet.Packet + for { + p, err = packets.Next() + if err == io.EOF { + return + } else if err != nil { + if _, ok := err.(errors.UnsupportedError); ok { + err = nil + continue + } + return + } + + if pk, ok := p.(*packet.PublicKey); ok && !pk.IsSubkey { + packets.Unread(p) + return + } + } +} + +// ReadEntity reads an entity (public key, identities, subkeys etc) from the +// given Reader. +func ReadEntity(packets *packet.Reader) (*Entity, error) { + e := new(Entity) + e.Identities = make(map[string]*Identity) + + p, err := packets.Next() + if err != nil { + return nil, err + } + + var ok bool + if e.PrimaryKey, ok = p.(*packet.PublicKey); !ok { + if e.PrivateKey, ok = p.(*packet.PrivateKey); !ok { + packets.Unread(p) + return nil, errors.StructuralError("first packet was not a public/private key") + } + e.PrimaryKey = &e.PrivateKey.PublicKey + } + + if !e.PrimaryKey.PubKeyAlgo.CanSign() { + return nil, errors.StructuralError("primary key cannot be used for signatures") + } + + var revocations []*packet.Signature +EachPacket: + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + switch pkt := p.(type) { + case *packet.UserId: + if err := addUserID(e, packets, pkt); err != nil { + return nil, err + } + case *packet.Signature: + if pkt.SigType == packet.SigTypeKeyRevocation { + revocations = append(revocations, pkt) + } else if pkt.SigType == packet.SigTypeDirectSignature { + // TODO: RFC4880 5.2.1 permits signatures + // directly on keys (eg. to bind additional + // revocation keys). + } + // Else, ignoring the signature as it does not follow anything + // we would know to attach it to. + case *packet.PrivateKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, &pkt.PublicKey, pkt) + if err != nil { + return nil, err + } + case *packet.PublicKey: + if pkt.IsSubkey == false { + packets.Unread(p) + break EachPacket + } + err = addSubkey(e, packets, pkt, nil) + if err != nil { + return nil, err + } + default: + // we ignore unknown packets + } + } + + if len(e.Identities) == 0 { + return nil, errors.StructuralError("entity without any identities") + } + + for _, revocation := range revocations { + err = e.PrimaryKey.VerifyRevocationSignature(revocation) + if err == nil { + e.Revocations = append(e.Revocations, revocation) + } else { + // TODO: RFC 4880 5.2.3.15 defines revocation keys. + return nil, errors.StructuralError("revocation signature signed by alternate key") + } + } + + return e, nil +} + +func addUserID(e *Entity, packets *packet.Reader, pkt *packet.UserId) error { + // Make a new Identity object, that we might wind up throwing away. + // We'll only add it if we get a valid self-signature over this + // userID. + identity := new(Identity) + identity.Name = pkt.Id + identity.UserId = pkt + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return err + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if (sig.SigType == packet.SigTypePositiveCert || sig.SigType == packet.SigTypeGenericCert) && sig.CheckKeyIdOrFingerprint(e.PrimaryKey) { + if err = e.PrimaryKey.VerifyUserIdSignature(pkt.Id, e.PrimaryKey, sig); err != nil { + return errors.StructuralError("user ID self-signature invalid: " + err.Error()) + } + if identity.SelfSignature == nil || sig.CreationTime.After(identity.SelfSignature.CreationTime) { + identity.SelfSignature = sig + } + identity.Signatures = append(identity.Signatures, sig) + e.Identities[pkt.Id] = identity + } else { + identity.Signatures = append(identity.Signatures, sig) + } + } + + return nil +} + +func addSubkey(e *Entity, packets *packet.Reader, pub *packet.PublicKey, priv *packet.PrivateKey) error { + var subKey Subkey + subKey.PublicKey = pub + subKey.PrivateKey = priv + + for { + p, err := packets.Next() + if err == io.EOF { + break + } else if err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + sig, ok := p.(*packet.Signature) + if !ok { + packets.Unread(p) + break + } + + if sig.SigType != packet.SigTypeSubkeyBinding && sig.SigType != packet.SigTypeSubkeyRevocation { + return errors.StructuralError("subkey signature with wrong type") + } + + if err := e.PrimaryKey.VerifyKeySignature(subKey.PublicKey, sig); err != nil { + return errors.StructuralError("subkey signature invalid: " + err.Error()) + } + + switch sig.SigType { + case packet.SigTypeSubkeyRevocation: + subKey.Sig = sig + case packet.SigTypeSubkeyBinding: + if shouldReplaceSubkeySig(subKey.Sig, sig) { + subKey.Sig = sig + } + } + } + + if subKey.Sig == nil { + return errors.StructuralError("subkey packet not followed by signature") + } + + e.Subkeys = append(e.Subkeys, subKey) + + return nil +} + +func shouldReplaceSubkeySig(existingSig, potentialNewSig *packet.Signature) bool { + if potentialNewSig == nil { + return false + } + + if existingSig == nil { + return true + } + + if existingSig.SigType == packet.SigTypeSubkeyRevocation { + return false // never override a revocation signature + } + + return potentialNewSig.CreationTime.After(existingSig.CreationTime) +} + +// SerializePrivate serializes an Entity, including private key material, but +// excluding signatures from other entities, to the given Writer. +// Identities and subkeys are re-signed in case they changed since NewEntry. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { + if e.PrivateKey.Dummy() { + return errors.ErrDummyPrivateKey("dummy private key cannot re-sign identities") + } + return e.serializePrivate(w, config, true) +} + +// SerializePrivateWithoutSigning serializes an Entity, including private key +// material, but excluding signatures from other entities, to the given Writer. +// Self-signatures of identities and subkeys are not re-signed. This is useful +// when serializing GNU dummy keys, among other things. +// If config is nil, sensible defaults will be used. +func (e *Entity) SerializePrivateWithoutSigning(w io.Writer, config *packet.Config) (err error) { + return e.serializePrivate(w, config, false) +} + +func (e *Entity) serializePrivate(w io.Writer, config *packet.Config, reSign bool) (err error) { + if e.PrivateKey == nil { + return goerrors.New("openpgp: private key is missing") + } + err = e.PrivateKey.Serialize(w) + if err != nil { + return + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return + } + if reSign { + err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return + } + } + err = ident.SelfSignature.Serialize(w) + if err != nil { + return + } + } + for _, subkey := range e.Subkeys { + err = subkey.PrivateKey.Serialize(w) + if err != nil { + return + } + if reSign { + err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) + if err != nil { + return + } + if subkey.Sig.EmbeddedSignature != nil { + err = subkey.Sig.EmbeddedSignature.CrossSignKey(subkey.PublicKey, e.PrimaryKey, + subkey.PrivateKey, config) + if err != nil { + return + } + } + } + err = subkey.Sig.Serialize(w) + if err != nil { + return + } + } + return nil +} + +// Serialize writes the public part of the given Entity to w, including +// signatures from other entities. No private key material will be output. +func (e *Entity) Serialize(w io.Writer) error { + err := e.PrimaryKey.Serialize(w) + if err != nil { + return err + } + for _, ident := range e.Identities { + err = ident.UserId.Serialize(w) + if err != nil { + return err + } + for _, sig := range ident.Signatures { + err = sig.Serialize(w) + if err != nil { + return err + } + } + } + for _, subkey := range e.Subkeys { + err = subkey.PublicKey.Serialize(w) + if err != nil { + return err + } + err = subkey.Sig.Serialize(w) + if err != nil { + return err + } + } + return nil +} + +// SignIdentity adds a signature to e, from signer, attesting that identity is +// associated with e. The provided identity must already be an element of +// e.Identities and the private key of signer must have been decrypted if +// necessary. +// If config is nil, sensible defaults will be used. +func (e *Entity) SignIdentity(identity string, signer *Entity, config *packet.Config) error { + if signer.PrivateKey == nil { + return errors.InvalidArgumentError("signing Entity must have a private key") + } + if signer.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing Entity's private key must be decrypted") + } + ident, ok := e.Identities[identity] + if !ok { + return errors.InvalidArgumentError("given identity string not found in Entity") + } + + sig := &packet.Signature{ + Version: signer.PrivateKey.Version, + SigType: packet.SigTypeGenericCert, + PubKeyAlgo: signer.PrivateKey.PubKeyAlgo, + Hash: config.Hash(), + CreationTime: config.Now(), + IssuerKeyId: &signer.PrivateKey.KeyId, + } + if err := sig.SignUserId(identity, e.PrimaryKey, signer.PrivateKey, config); err != nil { + return err + } + ident.Signatures = append(ident.Signatures, sig) + return nil +} + +// RevokeKey generates a key revocation signature (packet.SigTypeKeyRevocation) with the +// specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeKey(reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + reasonCode := uint8(reason) + revSig := &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: config.Now(), + SigType: packet.SigTypeKeyRevocation, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + RevocationReason: &reasonCode, + RevocationReasonText: reasonText, + IssuerKeyId: &e.PrimaryKey.KeyId, + } + + if err := revSig.RevokeKey(e.PrimaryKey, e.PrivateKey, config); err != nil { + return err + } + e.Revocations = append(e.Revocations, revSig) + return nil +} + +// RevokeSubkey generates a subkey revocation signature (packet.SigTypeSubkeyRevocation) for +// a subkey with the specified reason code and text (RFC4880 section-5.2.3.23). +// If config is nil, sensible defaults will be used. +func (e *Entity) RevokeSubkey(sk *Subkey, reason packet.ReasonForRevocation, reasonText string, config *packet.Config) error { + if err := e.PrimaryKey.VerifyKeySignature(sk.PublicKey, sk.Sig); err != nil { + return errors.InvalidArgumentError("given subkey is not associated with this key") + } + + reasonCode := uint8(reason) + revSig := &packet.Signature{ + Version: e.PrimaryKey.Version, + CreationTime: config.Now(), + SigType: packet.SigTypeSubkeyRevocation, + PubKeyAlgo: packet.PubKeyAlgoRSA, + Hash: config.Hash(), + RevocationReason: &reasonCode, + RevocationReasonText: reasonText, + IssuerKeyId: &e.PrimaryKey.KeyId, + } + + if err := revSig.RevokeKey(sk.PublicKey, e.PrivateKey, config); err != nil { + return err + } + + sk.Sig = revSig + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go new file mode 100644 index 0000000000..21d17c2f8b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/keys_test_data.go @@ -0,0 +1,336 @@ +package openpgp + +const expiringKeyHex = "c6c04d0451d0c680010800abbb021fd03ffc4e96618901180c3fdcb060ee69eeead97b91256d11420d80b5f1b51930248044130bd300605cf8a05b7a40d3d8cfb0a910be2e3db50dcd50a9c54064c2a5550801daa834ff4480b33d3d3ca495ff8a4e84a886977d17d998f881241a874083d8b995beab555b6d22b8a4817ab17ac3e7304f7d4d2c05c495fb2218348d3bc13651db1d92732e368a9dd7dcefa6eddff30b94706a9aaee47e9d39321460b740c59c6fc3c2fd8ab6c0fb868cb87c0051f0321301fe0f0e1820b15e7fb7063395769b525005c7e30a7ce85984f5cac00504e7b4fdc45d74958de8388436fd5c7ba9ea121f1c851b5911dd1b47a14d81a09e92ef37721e2325b6790011010001cd00c2c07b041001080025050251d0c680050900278d00060b09070803020415080a0203160201021901021b03021e01000a0910e7b484133a890a35ae4b0800a1beb82e7f28eaf5273d6af9d3391314f6280b2b624eaca2851f89a9ebcaf80ac589ebd509f168bc4322106ca2e2ce77a76e071a3c7444787d65216b5f05e82c77928860b92aace3b7d0327db59492f422eb9dfab7249266d37429870b091a98aba8724c2259ebf8f85093f21255eafa75aa841e31d94f2ac891b9755fed455e539044ee69fc47950b80e003fc9f298d695660f28329eaa38037c367efde1727458e514faf990d439a21461b719edaddf9296d3d0647b43ca56cb8dbf63b4fcf8b9968e7928c463470fab3b98e44d0d95645062f94b2d04fe56bd52822b71934db8ce845622c40b92fcbe765a142e7f38b61a6aa9606c8e8858dcd3b6eb1894acec04d0451d1f06b01080088bea67444e1789390e7c0335c86775502d58ec783d99c8ef4e06de235ed3dd4b0467f6f358d818c7d8989d43ec6d69fcbc8c32632d5a1b605e3fa8e41d695fcdcaa535936cd0157f9040dce362519803b908eafe838bb13216c885c6f93e9e8d5745607f0d062322085d6bdc760969149a8ff8dd9f5c18d9bfe2e6f63a06e17694cf1f67587c6fb70e9aebf90ffc528ca3b615ac7c9d4a21ea4f7c06f2e98fbbd90a859b8608bf9ea638e3a54289ce44c283110d0c45fa458de6251cd6e7baf71f80f12c8978340490fd90c92b81736ae902ed958e478dceae2835953d189c45d182aff02ea2be61b81d8e94430f041d638647b43e2fcb45fd512fbf5068b810011010001c2c06504180108000f050251d1f06b050900081095021b0c000a0910e7b484133a890a35e63407fe2ec88d6d1e6c9ce7553ece0cb2524747217bad29f251d33df84599ffcc900141a355abd62126800744068a5e05dc167056aa9205273dc7765a2ed49db15c2a83b8d6e6429c902136f1e12229086c1c10c0053242c2a4ae1930db58163387a48cad64607ff2153c320e42843dec28e3fce90e7399d63ac0affa2fee1f0adc0953c89eb3f46ef1d6c04328ed13b491669d5120a3782e3ffb7c69575fb77eebd108794f4dda9d34be2bae57e8e59ec8ebfda2f6f06104b2321be408ea146e2db482b00c5055c8618de36ac9716f80da2617e225556d0fce61b01c8cea2d1e0ea982c31711060ca370f2739366e1e708f38405d784b49d16a26cf62d152eae734327cec04d0451d1f07b010800d5af91c5e7c2fd8951c8d254eab0c97cdcb66822f868b79b78c366255059a68fd74ebca9adb9b970cd9e586690e6e0756705432306878c897b10a4b4ca0005966f99ac8fa4e6f9caf54bf8e53844544beee9872a7ac64c119cf1393d96e674254b661f61ee975633d0e8a8672531edb6bb8e211204e7754a9efa802342118eee850beea742bac95a3f706cc2024cf6037a308bb68162b2f53b9a6346a96e6d31871a2456186e24a1c7a82b82ac04afdfd57cd7fb9ba77a9c760d40b76a170f7be525e5fb6a9848cc726e806187710d9b190387df28700f321f988a392899f93815cc937f309129eb94d5299c5547cb2c085898e6639496e70d746c9d3fb9881d0011010001c2c06504180108000f050251d1f07b050900266305021b0c000a0910e7b484133a890a35bff207fd10dfe8c4a6ea1dd30568012b6fd6891a763c87ad0f7a1d112aad9e8e3239378a3b85588c235865bac2e614348cb4f216d7217f53b3ef48c192e0a4d31d64d7bfa5faccf21155965fa156e887056db644a05ad08a85cc6152d1377d9e37b46f4ff462bbe68ace2dc586ef90070314576c985d8037c2ba63f0a7dc17a62e15bd77e88bc61d9d00858979709f12304264a4cf4225c5cf86f12c8e19486cb9cdcc69f18f027e5f16f4ca8b50e28b3115eaff3a345acd21f624aef81f6ede515c1b55b26b84c1e32264754eab672d5489b287e7277ea855e0a5ff2aa9e8b8c76d579a964ec225255f4d57bf66639ccb34b64798846943e162a41096a7002ca21c7f56" +const subkeyUsageHex = "988d04533a52bc010400d26af43085558f65b9e7dbc90cb9238015259aed5e954637adcfa2181548b2d0b60c65f1f42ec5081cbf1bc0a8aa4900acfb77070837c58f26012fbce297d70afe96e759ad63531f0037538e70dbf8e384569b9720d99d8eb39d8d0a2947233ed242436cb6ac7dfe74123354b3d0119b5c235d3dd9c9d6c004f8ffaf67ad8583001101000188b7041f010200210502533b8552170c8001ce094aa433f7040bb2ddf0be3893cb843d0fe70c020700000a0910a42704b92866382aa98404009d63d916a27543da4221c60087c33f1c44bec9998c5438018ed370cca4962876c748e94b73eb39c58eb698063f3fd6346d58dd2a11c0247934c4a9d71f24754f7468f96fb24c3e791dd2392b62f626148ad724189498cbf993db2df7c0cdc2d677c35da0f16cb16c9ce7c33b4de65a4a91b1d21a130ae9cc26067718910ef8e2b417556d627261203c756d627261407379642e65642e61753e88b80413010200220502533a52bc021b03060b090807030206150802090a0b0416020301021e01021780000a0910a42704b92866382a47840400c0c2bd04f5fca586de408b395b3c280a278259c93eaaa8b79a53b97003f8ed502a8a00446dd9947fb462677e4fcac0dac2f0701847d15130aadb6cd9e0705ea0cf5f92f129136c7be21a718d46c8e641eb7f044f2adae573e11ae423a0a9ca51324f03a8a2f34b91fa40c3cc764bee4dccadedb54c768ba0469b683ea53f1c29b88d04533a52bc01040099c92a5d6f8b744224da27bc2369127c35269b58bec179de6bbc038f749344222f85a31933224f26b70243c4e4b2d242f0c4777eaef7b5502f9dad6d8bf3aaeb471210674b74de2d7078af497d55f5cdad97c7bedfbc1b41e8065a97c9c3d344b21fc81d27723af8e374bc595da26ea242dccb6ae497be26eea57e563ed517e90011010001889f0418010200090502533a52bc021b0c000a0910a42704b92866382afa1403ff70284c2de8a043ff51d8d29772602fa98009b7861c540535f874f2c230af8caf5638151a636b21f8255003997ccd29747fdd06777bb24f9593bd7d98a3e887689bf902f999915fcc94625ae487e5d13e6616f89090ebc4fdc7eb5cad8943e4056995bb61c6af37f8043016876a958ec7ebf39c43d20d53b7f546cfa83e8d2604b88d04533b8283010400c0b529316dbdf58b4c54461e7e669dc11c09eb7f73819f178ccd4177b9182b91d138605fcf1e463262fabefa73f94a52b5e15d1904635541c7ea540f07050ce0fb51b73e6f88644cec86e91107c957a114f69554548a85295d2b70bd0b203992f76eb5d493d86d9eabcaa7ef3fc7db7e458438db3fcdb0ca1cc97c638439a9170011010001889f0418010200090502533b8283021b0c000a0910a42704b92866382adc6d0400cfff6258485a21675adb7a811c3e19ebca18851533f75a7ba317950b9997fda8d1a4c8c76505c08c04b6c2cc31dc704d33da36a21273f2b388a1a706f7c3378b66d887197a525936ed9a69acb57fe7f718133da85ec742001c5d1864e9c6c8ea1b94f1c3759cebfd93b18606066c063a63be86085b7e37bdbc65f9a915bf084bb901a204533b85cd110400aed3d2c52af2b38b5b67904b0ef73d6dd7aef86adb770e2b153cd22489654dcc91730892087bb9856ae2d9f7ed1eb48f214243fe86bfe87b349ebd7c30e630e49c07b21fdabf78b7a95c8b7f969e97e3d33f2e074c63552ba64a2ded7badc05ce0ea2be6d53485f6900c7860c7aa76560376ce963d7271b9b54638a4028b573f00a0d8854bfcdb04986141568046202192263b9b67350400aaa1049dbc7943141ef590a70dcb028d730371d92ea4863de715f7f0f16d168bd3dc266c2450457d46dcbbf0b071547e5fbee7700a820c3750b236335d8d5848adb3c0da010e998908dfd93d961480084f3aea20b247034f8988eccb5546efaa35a92d0451df3aaf1aee5aa36a4c4d462c760ecd9cebcabfbe1412b1f21450f203fd126687cd486496e971a87fd9e1a8a765fe654baa219a6871ab97768596ab05c26c1aeea8f1a2c72395a58dbc12ef9640d2b95784e974a4d2d5a9b17c25fedacfe551bda52602de8f6d2e48443f5dd1a2a2a8e6a5e70ecdb88cd6e766ad9745c7ee91d78cc55c3d06536b49c3fee6c3d0b6ff0fb2bf13a314f57c953b8f4d93bf88e70418010200090502533b85cd021b0200520910a42704b92866382a47200419110200060502533b85cd000a091042ce2c64bc0ba99214b2009e26b26852c8b13b10c35768e40e78fbbb48bd084100a0c79d9ea0844fa5853dd3c85ff3ecae6f2c9dd6c557aa04008bbbc964cd65b9b8299d4ebf31f41cc7264b8cf33a00e82c5af022331fac79efc9563a822497ba012953cefe2629f1242fcdcb911dbb2315985bab060bfd58261ace3c654bdbbe2e8ed27a46e836490145c86dc7bae15c011f7e1ffc33730109b9338cd9f483e7cef3d2f396aab5bd80efb6646d7e778270ee99d934d187dd98" +const revokedKeyHex = "988d045331ce82010400c4fdf7b40a5477f206e6ee278eaef888ca73bf9128a9eef9f2f1ddb8b7b71a4c07cfa241f028a04edb405e4d916c61d6beabc333813dc7b484d2b3c52ee233c6a79b1eea4e9cc51596ba9cd5ac5aeb9df62d86ea051055b79d03f8a4fa9f38386f5bd17529138f3325d46801514ea9047977e0829ed728e68636802796801be10011010001889f04200102000905025331d0e3021d03000a0910a401d9f09a34f7c042aa040086631196405b7e6af71026b88e98012eab44aa9849f6ef3fa930c7c9f23deaedba9db1538830f8652fb7648ec3fcade8dbcbf9eaf428e83c6cbcc272201bfe2fbb90d41963397a7c0637a1a9d9448ce695d9790db2dc95433ad7be19eb3de72dacf1d6db82c3644c13eae2a3d072b99bb341debba012c5ce4006a7d34a1f4b94b444526567205265766f6b657220283c52656727732022424d204261726973746122204b657920262530305c303e5c29203c72656740626d626172697374612e636f2e61753e88b704130102002205025331ce82021b03060b090807030206150802090a0b0416020301021e01021780000a0910a401d9f09a34f7c0019c03f75edfbeb6a73e7225ad3cc52724e2872e04260d7daf0d693c170d8c4b243b8767bc7785763533febc62ec2600c30603c433c095453ede59ff2fcabeb84ce32e0ed9d5cf15ffcbc816202b64370d4d77c1e9077d74e94a16fb4fa2e5bec23a56d7a73cf275f91691ae1801a976fcde09e981a2f6327ac27ea1fecf3185df0d56889c04100102000605025331cfb5000a0910fe9645554e8266b64b4303fc084075396674fb6f778d302ac07cef6bc0b5d07b66b2004c44aef711cbac79617ef06d836b4957522d8772dd94bf41a2f4ac8b1ee6d70c57503f837445a74765a076d07b829b8111fc2a918423ddb817ead7ca2a613ef0bfb9c6b3562aec6c3cf3c75ef3031d81d95f6563e4cdcc9960bcb386c5d757b104fcca5fe11fc709df884604101102000605025331cfe7000a09107b15a67f0b3ddc0317f6009e360beea58f29c1d963a22b962b80788c3fa6c84e009d148cfde6b351469b8eae91187eff07ad9d08fcaab88d045331ce820104009f25e20a42b904f3fa555530fe5c46737cf7bd076c35a2a0d22b11f7e0b61a69320b768f4a80fe13980ce380d1cfc4a0cd8fbe2d2e2ef85416668b77208baa65bf973fe8e500e78cc310d7c8705cdb34328bf80e24f0385fce5845c33bc7943cf6b11b02348a23da0bf6428e57c05135f2dc6bd7c1ce325d666d5a5fd2fd5e410011010001889f04180102000905025331ce82021b0c000a0910a401d9f09a34f7c0418003fe34feafcbeaef348a800a0d908a7a6809cc7304017d820f70f0474d5e23cb17e38b67dc6dca282c6ca00961f4ec9edf2738d0f087b1d81e4871ef08e1798010863afb4eac4c44a376cb343be929c5be66a78cfd4456ae9ec6a99d97f4e1c3ff3583351db2147a65c0acef5c003fb544ab3a2e2dc4d43646f58b811a6c3a369d1f" +const revokedSubkeyHex = "988d04533121f6010400aefc803a3e4bb1a61c86e8a86d2726c6a43e0079e9f2713f1fa017e9854c83877f4aced8e331d675c67ea83ddab80aacbfa0b9040bb12d96f5a3d6be09455e2a76546cbd21677537db941cab710216b6d24ec277ee0bd65b910f416737ed120f6b93a9d3b306245c8cfd8394606fdb462e5cf43c551438d2864506c63367fc890011010001b41d416c696365203c616c69636540626d626172697374612e636f2e61753e88bb041301020025021b03060b090807030206150802090a0b0416020301021e01021780050253312798021901000a09104ef7e4beccde97f015a803ff5448437780f63263b0df8442a995e7f76c221351a51edd06f2063d8166cf3157aada4923dfc44aa0f2a6a4da5cf83b7fe722ba8ab416c976e77c6b5682e7f1069026673bd0de56ba06fd5d7a9f177607f277d9b55ff940a638c3e68525c67517e2b3d976899b93ca267f705b3e5efad7d61220e96b618a4497eab8d04403d23f8846041011020006050253312910000a09107b15a67f0b3ddc03d96e009f50b6365d86c4be5d5e9d0ea42d5e56f5794c617700a0ab274e19c2827780016d23417ce89e0a2c0d987d889c04100102000605025331cf7a000a0910a401d9f09a34f7c0ee970400aca292f213041c9f3b3fc49148cbda9d84afee6183c8dd6c5ff2600b29482db5fecd4303797be1ee6d544a20a858080fec43412061c9a71fae4039fd58013b4ae341273e6c66ad4c7cdd9e68245bedb260562e7b166f2461a1032f2b38c0e0e5715fb3d1656979e052b55ca827a76f872b78a9fdae64bc298170bfcebedc1271b41a416c696365203c616c696365407379646973702e6f722e61753e88b804130102002205025331278b021b03060b090807030206150802090a0b0416020301021e01021780000a09104ef7e4beccde97f06a7003fa03c3af68d272ebc1fa08aa72a03b02189c26496a2833d90450801c4e42c5b5f51ad96ce2d2c9cef4b7c02a6a2fcf1412d6a2d486098eb762f5010a201819c17fd2888aec8eda20c65a3b75744de7ee5cc8ac7bfc470cbe3cb982720405a27a3c6a8c229cfe36905f881b02ed5680f6a8f05866efb9d6c5844897e631deb949ca8846041011020006050253312910000a09107b15a67f0b3ddc0347bc009f7fa35db59147469eb6f2c5aaf6428accb138b22800a0caa2f5f0874bacc5909c652a57a31beda65eddd5889c04100102000605025331cf7a000a0910a401d9f09a34f7c0316403ff46f2a5c101256627f16384d34a38fb47a6c88ba60506843e532d91614339fccae5f884a5741e7582ffaf292ba38ee10a270a05f139bde3814b6a077e8cd2db0f105ebea2a83af70d385f13b507fac2ad93ff79d84950328bb86f3074745a8b7f9b64990fb142e2a12976e27e8d09a28dc5621f957ac49091116da410ac3cbde1b88d04533121f6010400cbd785b56905e4192e2fb62a720727d43c4fa487821203cf72138b884b78b701093243e1d8c92a0248a6c0203a5a88693da34af357499abacaf4b3309c640797d03093870a323b4b6f37865f6eaa2838148a67df4735d43a90ca87942554cdf1c4a751b1e75f9fd4ce4e97e278d6c1c7ed59d33441df7d084f3f02beb68896c70011010001889f0418010200090502533121f6021b0c000a09104ef7e4beccde97f0b98b03fc0a5ccf6a372995835a2f5da33b282a7d612c0ab2a97f59cf9fff73e9110981aac2858c41399afa29624a7fd8a0add11654e3d882c0fd199e161bdad65e5e2548f7b68a437ea64293db1246e3011cbb94dc1bcdeaf0f2539bd88ff16d95547144d97cead6a8c5927660a91e6db0d16eb36b7b49a3525b54d1644e65599b032b7eb901a204533127a0110400bd3edaa09eff9809c4edc2c2a0ebe52e53c50a19c1e49ab78e6167bf61473bb08f2050d78a5cbbc6ed66aff7b42cd503f16b4a0b99fa1609681fca9b7ce2bbb1a5b3864d6cdda4d7ef7849d156d534dea30fb0efb9e4cf8959a2b2ce623905882d5430b995a15c3b9fe92906086788b891002924f94abe139b42cbbfaaabe42f00a0b65dc1a1ad27d798adbcb5b5ad02d2688c89477b03ff4eebb6f7b15a73b96a96bed201c0e5e4ea27e4c6e2dd1005b94d4b90137a5b1cf5e01c6226c070c4cc999938101578877ee76d296b9aab8246d57049caacf489e80a3f40589cade790a020b1ac146d6f7a6241184b8c7fcde680eae3188f5dcbe846d7f7bdad34f6fcfca08413e19c1d5df83fc7c7c627d493492e009c2f52a80400a2fe82de87136fd2e8845888c4431b032ba29d9a29a804277e31002a8201fb8591a3e55c7a0d0881496caf8b9fb07544a5a4879291d0dc026a0ea9e5bd88eb4aa4947bbd694b25012e208a250d65ddc6f1eea59d3aed3b4ec15fcab85e2afaa23a40ab1ef9ce3e11e1bc1c34a0e758e7aa64deb8739276df0af7d4121f834a9b88e70418010200090502533127a0021b02005209104ef7e4beccde97f047200419110200060502533127a0000a0910dbce4ee19529437fe045009c0b32f5ead48ee8a7e98fac0dea3d3e6c0e2c552500a0ad71fadc5007cfaf842d9b7db3335a8cdad15d3d1a6404009b08e2c68fe8f3b45c1bb72a4b3278cdf3012aa0f229883ad74aa1f6000bb90b18301b2f85372ca5d6b9bf478d235b733b1b197d19ccca48e9daf8e890cb64546b4ce1b178faccfff07003c172a2d4f5ebaba9f57153955f3f61a9b80a4f5cb959908f8b211b03b7026a8a82fc612bfedd3794969bcf458c4ce92be215a1176ab88d045331d144010400a5063000c5aaf34953c1aa3bfc95045b3aab9882b9a8027fecfe2142dc6b47ba8aca667399990244d513dd0504716908c17d92c65e74219e004f7b83fc125e575dd58efec3ab6dd22e3580106998523dea42ec75bf9aa111734c82df54630bebdff20fe981cfc36c76f865eb1c2fb62c9e85bc3a6e5015a361a2eb1c8431578d0011010001889f04280102000905025331d433021d03000a09104ef7e4beccde97f02e5503ff5e0630d1b65291f4882b6d40a29da4616bb5088717d469fbcc3648b8276de04a04988b1f1b9f3e18f52265c1f8b6c85861691c1a6b8a3a25a1809a0b32ad330aec5667cb4262f4450649184e8113849b05e5ad06a316ea80c001e8e71838190339a6e48bbde30647bcf245134b9a97fa875c1d83a9862cae87ffd7e2c4ce3a1b89013d04180102000905025331d144021b0200a809104ef7e4beccde97f09d2004190102000605025331d144000a0910677815e371c2fd23522203fe22ab62b8e7a151383cea3edd3a12995693911426f8ccf125e1f6426388c0010f88d9ca7da2224aee8d1c12135998640c5e1813d55a93df472faae75bef858457248db41b4505827590aeccf6f9eb646da7f980655dd3050c6897feddddaca90676dee856d66db8923477d251712bb9b3186b4d0114daf7d6b59272b53218dd1da94a03ff64006fcbe71211e5daecd9961fba66cdb6de3f914882c58ba5beddeba7dcb950c1156d7fba18c19ea880dccc800eae335deec34e3b84ac75ffa24864f782f87815cda1c0f634b3dd2fa67cea30811d21723d21d9551fa12ccbcfa62b6d3a15d01307b99925707992556d50065505b090aadb8579083a20fe65bd2a270da9b011" + +const missingCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAJcXQeP+NmuciE99YcJoffxv +2gVLU4ZXBNHEaP0mgaJ1+tmMD089vUQAcyGRvw8jfsNsVZQIOAuRxY94aHQhIRHR +bUzBN28ofo/AJJtfx62C15xt6fDKRV6HXYqAiygrHIpEoRLyiN69iScUsjIJeyFL +C8wa72e8pSL6dkHoaV1N9ZH/xmrJ+k0vsgkQaAh9CzYufncDxcwkoP+aOlGtX1gP +WwWoIbz0JwLEMPHBWvDDXQcQPQTYQyj+LGC9U6f9VZHN25E94subM1MjuT9OhN9Y +MLfWaaIc5WyhLFyQKW2Upofn9wSFi8ubyBnv640Dfd0rVmaWv7LNTZpoZ/GbJAMA +EQEAAYkBHwQYAQIACQUCU5ygeQIbAgAKCRDt1A0FCB6SP0zCB/sEzaVR38vpx+OQ +MMynCBJrakiqDmUZv9xtplY7zsHSQjpd6xGflbU2n+iX99Q+nav0ETQZifNUEd4N +1ljDGQejcTyKD6Pkg6wBL3x9/RJye7Zszazm4+toJXZ8xJ3800+BtaPoI39akYJm ++ijzbskvN0v/j5GOFJwQO0pPRAFtdHqRs9Kf4YanxhedB4dIUblzlIJuKsxFit6N +lgGRblagG3Vv2eBszbxzPbJjHCgVLR3RmrVezKOsZjr/2i7X+xLWIR0uD3IN1qOW +CXQxLBizEEmSNVNxsp7KPGTLnqO3bPtqFirxS9PJLIMPTPLNBY7ZYuPNTMqVIUWF +4artDmrG +=7FfJ +-----END PGP PUBLIC KEY BLOCK-----` + +const invalidCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFMYynYBCACVOZ3/e8Bm2b9KH9QyIlHGo/i1bnkpqsgXj8tpJ2MIUOnXMMAY +ztW7kKFLCmgVdLIC0vSoLA4yhaLcMojznh/2CcUglZeb6Ao8Gtelr//Rd5DRfPpG +zqcfUo+m+eO1co2Orabw0tZDfGpg5p3AYl0hmxhUyYSc/xUq93xL1UJzBFgYXY54 +QsM8dgeQgFseSk/YvdP5SMx1ev+eraUyiiUtWzWrWC1TdyRa5p4UZg6Rkoppf+WJ +QrW6BWrhAtqATHc8ozV7uJjeONjUEq24roRc/OFZdmQQGK6yrzKnnbA6MdHhqpdo +9kWDcXYb7pSE63Lc+OBa5X2GUVvXJLS/3nrtABEBAAG0F2ludmFsaWQtc2lnbmlu +Zy1zdWJrZXlziQEoBBMBAgASBQJTnKB5AhsBAgsHAhUIAh4BAAoJEO3UDQUIHpI/ +dN4H/idX4FQ1LIZCnpHS/oxoWQWfpRgdKAEM0qCqjMgiipJeEwSQbqjTCynuh5/R +JlODDz85ABR06aoF4l5ebGLQWFCYifPnJZ/Yf5OYcMGtb7dIbqxWVFL9iLMO/oDL +ioI3dotjPui5e+2hI9pVH1UHB/bZ/GvMGo6Zg0XxLPolKQODMVjpjLAQ0YJ3spew +RAmOGre6tIvbDsMBnm8qREt7a07cBJ6XK7xjxYaZHQBiHVxyEWDa6gyANONx8duW +/fhQ/zDTnyVM/ik6VO0Ty9BhPpcEYLFwh5c1ilFari1ta3e6qKo6ZGa9YMk/REhu +yBHd9nTkI+0CiQUmbckUiVjDKKe5AQ0EUxjKdgEIAIINDqlj7X6jYKc6DjwrOkjQ +UIRWbQQar0LwmNilehmt70g5DCL1SYm9q4LcgJJ2Nhxj0/5qqsYib50OSWMcKeEe +iRXpXzv1ObpcQtI5ithp0gR53YPXBib80t3bUzomQ5UyZqAAHzMp3BKC54/vUrSK +FeRaxDzNLrCeyI00+LHNUtwghAqHvdNcsIf8VRumK8oTm3RmDh0TyjASWYbrt9c8 +R1Um3zuoACOVy+mEIgIzsfHq0u7dwYwJB5+KeM7ZLx+HGIYdUYzHuUE1sLwVoELh ++SHIGHI1HDicOjzqgajShuIjj5hZTyQySVprrsLKiXS6NEwHAP20+XjayJ/R3tEA +EQEAAYkCPgQYAQIBKAUCU5ygeQIbAsBdIAQZAQIABgUCU5ygeQAKCRCpVlnFZmhO +52RJB/9uD1MSa0wjY6tHOIgquZcP3bHBvHmrHNMw9HR2wRCMO91ZkhrpdS3ZHtgb +u3/55etj0FdvDo1tb8P8FGSVtO5Vcwf5APM8sbbqoi8L951Q3i7qt847lfhu6sMl +w0LWFvPTOLHrliZHItPRjOltS1WAWfr2jUYhsU9ytaDAJmvf9DujxEOsN5G1YJep +54JCKVCkM/y585Zcnn+yxk/XwqoNQ0/iJUT9qRrZWvoeasxhl1PQcwihCwss44A+ +YXaAt3hbk+6LEQuZoYS73yR3WHj+42tfm7YxRGeubXfgCEz/brETEWXMh4pe0vCL +bfWrmfSPq2rDegYcAybxRQz0lF8PAAoJEO3UDQUIHpI/exkH/0vQfdHA8g/N4T6E +i6b1CUVBAkvtdJpCATZjWPhXmShOw62gkDw306vHPilL4SCvEEi4KzG72zkp6VsB +DSRcpxCwT4mHue+duiy53/aRMtSJ+vDfiV1Vhq+3sWAck/yUtfDU9/u4eFaiNok1 +8/Gd7reyuZt5CiJnpdPpjCwelK21l2w7sHAnJF55ITXdOxI8oG3BRKufz0z5lyDY +s2tXYmhhQIggdgelN8LbcMhWs/PBbtUr6uZlNJG2lW1yscD4aI529VjwJlCeo745 +U7pO4eF05VViUJ2mmfoivL3tkhoTUWhx8xs8xCUcCg8DoEoSIhxtOmoTPR22Z9BL +6LCg2mg= +=Dhm4 +-----END PGP PUBLIC KEY BLOCK-----` + +const goodCrossSignatureKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Version: GnuPG v1 + +mI0EVUqeVwEEAMufHRrMPWK3gyvi0O0tABCs/oON9zV9KDZlr1a1M91ShCSFwCPo +7r80PxdWVWcj0V5h50/CJYtpN3eE/mUIgW2z1uDYQF1OzrQ8ubrksfsJvpAhENom +lTQEppv9mV8qhcM278teb7TX0pgrUHLYF5CfPdp1L957JLLXoQR/lwLVABEBAAG0 +E2dvb2Qtc2lnbmluZy1zdWJrZXmIuAQTAQIAIgUCVUqeVwIbAwYLCQgHAwIGFQgC +CQoLBBYCAwECHgECF4AACgkQNRjL95IRWP69XQQAlH6+eyXJN4DZTLX78KGjHrsw +6FCvxxClEPtPUjcJy/1KCRQmtLAt9PbbA78dvgzjDeZMZqRAwdjyJhjyg/fkU2OH +7wq4ktjUu+dLcOBb+BFMEY+YjKZhf6EJuVfxoTVr5f82XNPbYHfTho9/OABKH6kv +X70PaKZhbwnwij8Nts65AaIEVUqftREEAJ3WxZfqAX0bTDbQPf2CMT2IVMGDfhK7 +GyubOZgDFFjwUJQvHNvsrbeGLZ0xOBumLINyPO1amIfTgJNm1iiWFWfmnHReGcDl +y5mpYG60Mb79Whdcer7CMm3AqYh/dW4g6IB02NwZMKoUHo3PXmFLxMKXnWyJ0clw +R0LI/Qn509yXAKDh1SO20rqrBM+EAP2c5bfI98kyNwQAi3buu94qo3RR1ZbvfxgW +CKXDVm6N99jdZGNK7FbRifXqzJJDLcXZKLnstnC4Sd3uyfyf1uFhmDLIQRryn5m+ +LBYHfDBPN3kdm7bsZDDq9GbTHiFZUfm/tChVKXWxkhpAmHhU/tH6GGzNSMXuIWSO +aOz3Rqq0ED4NXyNKjdF9MiwD/i83S0ZBc0LmJYt4Z10jtH2B6tYdqnAK29uQaadx +yZCX2scE09UIm32/w7pV77CKr1Cp/4OzAXS1tmFzQ+bX7DR+Gl8t4wxr57VeEMvl +BGw4Vjh3X8//m3xynxycQU18Q1zJ6PkiMyPw2owZ/nss3hpSRKFJsxMLhW3fKmKr +Ey2KiOcEGAECAAkFAlVKn7UCGwIAUgkQNRjL95IRWP5HIAQZEQIABgUCVUqftQAK +CRD98VjDN10SqkWrAKDTpEY8D8HC02E/KVC5YUI01B30wgCgurpILm20kXEDCeHp +C5pygfXw1DJrhAP+NyPJ4um/bU1I+rXaHHJYroYJs8YSweiNcwiHDQn0Engh/mVZ +SqLHvbKh2dL/RXymC3+rjPvQf5cup9bPxNMa6WagdYBNAfzWGtkVISeaQW+cTEp/ +MtgVijRGXR/lGLGETPg2X3Afwn9N9bLMBkBprKgbBqU7lpaoPupxT61bL70= +=vtbN +-----END PGP PUBLIC KEY BLOCK-----` + +const revokedUserIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQENBFsgO5EBCADhREPmcjsPkXe1z7ctvyWL0S7oa9JaoGZ9oPDHFDlQxd0qlX2e +DZJZDg0qYvVixmaULIulApq1puEsaJCn3lHUbHlb4PYKwLEywYXM28JN91KtLsz/ +uaEX2KC5WqeP40utmzkNLq+oRX/xnRMgwbO7yUNVG2UlEa6eI+xOXO3YtLdmJMBW +ClQ066ZnOIzEo1JxnIwha1CDBMWLLfOLrg6l8InUqaXbtEBbnaIYO6fXVXELUjkx +nmk7t/QOk0tXCy8muH9UDqJkwDUESY2l79XwBAcx9riX8vY7vwC34pm22fAUVLCJ +x1SJx0J8bkeNp38jKM2Zd9SUQqSbfBopQ4pPABEBAAG0I0dvbGFuZyBHb3BoZXIg +PG5vLXJlcGx5QGdvbGFuZy5jb20+iQFUBBMBCgA+FiEE5Ik5JLcNx6l6rZfw1oFy +9I6cUoMFAlsgO5ECGwMFCQPCZwAFCwkIBwMFFQoJCAsFFgIDAQACHgECF4AACgkQ +1oFy9I6cUoMIkwf8DNPeD23i4jRwd/pylbvxwZintZl1fSwTJW1xcOa1emXaEtX2 +depuqhP04fjlRQGfsYAQh7X9jOJxAHjTmhqFBi5sD7QvKU00cPFYbJ/JTx0B41bl +aXnSbGhRPh63QtEZL7ACAs+shwvvojJqysx7kyVRu0EW2wqjXdHwR/SJO6nhNBa2 +DXzSiOU/SUA42mmG+5kjF8Aabq9wPwT9wjraHShEweNerNMmOqJExBOy3yFeyDpa +XwEZFzBfOKoxFNkIaVf5GSdIUGhFECkGvBMB935khftmgR8APxdU4BE7XrXexFJU +8RCuPXonm4WQOwTWR0vQg64pb2WKAzZ8HhwTGbQiR29sYW5nIEdvcGhlciA8cmV2 +b2tlZEBnb2xhbmcuY29tPokBNgQwAQoAIBYhBOSJOSS3Dcepeq2X8NaBcvSOnFKD +BQJbIDv3Ah0AAAoJENaBcvSOnFKDfWMIAKhI/Tvu3h8fSUxp/gSAcduT6bC1JttG +0lYQ5ilKB/58lBUA5CO3ZrKDKlzW3M8VEcvohVaqeTMKeoQd5rCZq8KxHn/KvN6N +s85REfXfniCKfAbnGgVXX3kDmZ1g63pkxrFu0fDZjVDXC6vy+I0sGyI/Inro0Pzb +tvn0QCsxjapKK15BtmSrpgHgzVqVg0cUp8vqZeKFxarYbYB2idtGRci4b9tObOK0 +BSTVFy26+I/mrFGaPrySYiy2Kz5NMEcRhjmTxJ8jSwEr2O2sUR0yjbgUAXbTxDVE +/jg5fQZ1ACvBRQnB7LvMHcInbzjyeTM3FazkkSYQD6b97+dkWwb1iWG5AQ0EWyA7 +kQEIALkg04REDZo1JgdYV4x8HJKFS4xAYWbIva1ZPqvDNmZRUbQZR2+gpJGEwn7z +VofGvnOYiGW56AS5j31SFf5kro1+1bZQ5iOONBng08OOo58/l1hRseIIVGB5TGSa +PCdChKKHreJI6hS3mShxH6hdfFtiZuB45rwoaArMMsYcjaezLwKeLc396cpUwwcZ +snLUNd1Xu5EWEF2OdFkZ2a1qYdxBvAYdQf4+1Nr+NRIx1u1NS9c8jp3PuMOkrQEi +bNtc1v6v0Jy52mKLG4y7mC/erIkvkQBYJdxPaP7LZVaPYc3/xskcyijrJ/5ufoD8 +K71/ShtsZUXSQn9jlRaYR0EbojMAEQEAAYkBPAQYAQoAJhYhBOSJOSS3Dcepeq2X +8NaBcvSOnFKDBQJbIDuRAhsMBQkDwmcAAAoJENaBcvSOnFKDkFMIAIt64bVZ8x7+ +TitH1bR4pgcNkaKmgKoZz6FXu80+SnbuEt2NnDyf1cLOSimSTILpwLIuv9Uft5Pb +OraQbYt3xi9yrqdKqGLv80bxqK0NuryNkvh9yyx5WoG1iKqMj9/FjGghuPrRaT4l +QinNAghGVkEy1+aXGFrG2DsOC1FFI51CC2WVTzZ5RwR2GpiNRfESsU1rZAUqf/2V +yJl9bD5R4SUNy8oQmhOxi+gbhD4Ao34e4W0ilibslI/uawvCiOwlu5NGd8zv5n+U +heiQvzkApQup5c+BhH5zFDFdKJ2CBByxw9+7QjMFI/wgLixKuE0Ob2kAokXf7RlB +7qTZOahrETw= +=IKnw +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyKwKQEEALwXhKBnyaaNFeK3ljfc/qn9X/QFw+28EUfgZPHjRmHubuXLE2uR +s3ZoSXY2z7Dkv+NyHYMt8p+X8q5fR7JvUjK2XbPyKoiJVnHINll83yl67DaWfKNL +EjNoO0kIfbXfCkZ7EG6DL+iKtuxniGTcnGT47e+HJSqb/STpLMnWwXjBABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQQ/ +lRafP/p9PytHbwxMvYJsOQdOOAUCWyKwKQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRBMvYJsOQdOOOsFBAC62mXww8XuqvYLcVOvHkWLT6mhxrQOJXnlfpn7 +2uBV9CMhoG/Ycd43NONsJrB95Apr9TDIqWnVszNbqPCuBhZQSGLdbiDKjxnCWBk0 +69qv4RNtkpOhYB7jK4s8F5oQZqId6JasT/PmJTH92mhBYhhTQr0GYFuPX2UJdkw9 +Sn9C67iNBFsisDUBBAC3A+Yo9lgCnxi/pfskyLrweYif6kIXWLAtLTsM6g/6jt7b +wTrknuCPyTv0QKGXsAEe/cK/Xq3HvX9WfXPGIHc/X56ZIsHQ+RLowbZV/Lhok1IW +FAuQm8axr/by80cRwFnzhfPc/ukkAq2Qyj4hLsGblu6mxeAhzcp8aqmWOO2H9QAR +AQABiLYEKAEKACAWIQQ/lRafP/p9PytHbwxMvYJsOQdOOAUCWyK16gIdAAAKCRBM +vYJsOQdOOB1vA/4u4uLONsE+2GVOyBsHyy7uTdkuxaR9b54A/cz6jT/tzUbeIzgx +22neWhgvIEghnUZd0vEyK9k1wy5vbDlEo6nKzHso32N1QExGr5upRERAxweDxGOj +7luDwNypI7QcifE64lS/JmlnunwRCdRWMKc0Fp+7jtRc5mpwyHN/Suf5RokBagQY +AQoAIBYhBD+VFp8/+n0/K0dvDEy9gmw5B044BQJbIrA1AhsCAL8JEEy9gmw5B044 +tCAEGQEKAB0WIQSNdnkaWY6t62iX336UXbGvYdhXJwUCWyKwNQAKCRCUXbGvYdhX +JxJSA/9fCPHP6sUtGF1o3G1a3yvOUDGr1JWcct9U+QpbCt1mZoNopCNDDQAJvDWl +mvDgHfuogmgNJRjOMznvahbF+wpTXmB7LS0SK412gJzl1fFIpK4bgnhu0TwxNsO1 +8UkCZWqxRMgcNUn9z6XWONK8dgt5JNvHSHrwF4CxxwjL23AAtK+FA/UUoi3U4kbC +0XnSr1Sl+mrzQi1+H7xyMe7zjqe+gGANtskqexHzwWPUJCPZ5qpIa2l8ghiUim6b +4ymJ+N8/T8Yva1FaPEqfMzzqJr8McYFm0URioXJPvOAlRxdHPteZ0qUopt/Jawxl +Xt6B9h1YpeLoJwjwsvbi98UTRs0jXwoY +=3fWu +-----END PGP PUBLIC KEY BLOCK-----` + +const keyWithSubKeyAndBadSelfSigOrder = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +mI0EWyLLDQEEAOqIOpJ/ha1OYAGduu9tS3rBz5vyjbNgJO4sFveEM0mgsHQ0X9/L +plonW+d0gRoO1dhJ8QICjDAc6+cna1DE3tEb5m6JtQ30teLZuqrR398Cf6w7NNVz +r3lrlmnH9JaKRuXl7tZciwyovneBfZVCdtsRZjaLI1uMQCz/BToiYe3DABEBAAG0 +I0dvbGFuZyBHb3BoZXIgPG5vLXJlcGx5QGdvbGFuZy5jb20+iM4EEwEKADgWIQRZ +sixZOfQcZdW0wUqmgmdsv1O9xgUCWyLLDQIbAwULCQgHAwUVCgkICwUWAgMBAAIe +AQIXgAAKCRCmgmdsv1O9xql2A/4pix98NxjhdsXtazA9agpAKeADf9tG4Za27Gj+ +3DCww/E4iP2X35jZimSm/30QRB6j08uGCqd9vXkkJxtOt63y/IpVOtWX6vMWSTUm +k8xKkaYMP0/IzKNJ1qC/qYEUYpwERBKg9Z+k99E2Ql4kRHdxXUHq6OzY79H18Y+s +GdeM/riNBFsiyxsBBAC54Pxg/8ZWaZX1phGdwfe5mek27SOYpC0AxIDCSOdMeQ6G +HPk38pywl1d+S+KmF/F4Tdi+kWro62O4eG2uc/T8JQuRDUhSjX0Qa51gPzJrUOVT +CFyUkiZ/3ZDhtXkgfuso8ua2ChBgR9Ngr4v43tSqa9y6AK7v0qjxD1x+xMrjXQAR +AQABiQFxBBgBCgAmAhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsizTIFCQAN +MRcAv7QgBBkBCgAdFiEEJcoVUVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62j +UpRPICQq5gQApoWIigZxXFoM0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBS +YnjyA4+n1D+zB2VqliD2QrsX12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZs +nRJmXV+bsvD4sidLZLjdwOVa3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/ +U73GGi0D/i20VW8AWYAPACm2zMlzExKTOAV01YTQH/3vW0WLrOse53WcIVZga6es +HuO4So0SOEAvxKMe5HpRIu2dJxTvd99Bo9xk9xJU0AoFrO0vNCRnL+5y68xMlODK +lEw5/kl0jeaTBp6xX0HDQOEVOpPGUwWV4Ij2EnvfNDXaE1vK1kffiQFrBBgBCgAg +AhsCFiEEWbIsWTn0HGXVtMFKpoJnbL9TvcYFAlsi0AYAv7QgBBkBCgAdFiEEJcoV +UVJIk5RWj1c/o62jUpRPICQFAlsiyxsACgkQo62jUpRPICQq5gQApoWIigZxXFoM +0uw4uJBS5JFZtirTANvirZV5RhndwHeMN6JttaBSYnjyA4+n1D+zB2VqliD2QrsX +12KJN6rGOehCtEIClQ1Hodo9nC6kMzzAwW1O8bZsnRJmXV+bsvD4sidLZLjdwOVa +3Cxh6pvq4Uur6a7/UYx121hEY0Qx0s8JEKaCZ2y/U73GRl0EAJokkXmy4zKDHWWi +wvK9gi2gQgRkVnu2AiONxJb5vjeLhM/07BRmH6K1o+w3fOeEQp4FjXj1eQ5fPSM6 +Hhwx2CTl9SDnPSBMiKXsEFRkmwQ2AAsQZLmQZvKBkLZYeBiwf+IY621eYDhZfo+G +1dh1WoUCyREZsJQg2YoIpWIcvw+a +=bNRo +-----END PGP PUBLIC KEY BLOCK----- +` + +const onlySubkeyNoPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1 + +lQCVBFggvocBBAC7vBsHn7MKmS6IiiZNTXdciplVgS9cqVd+RTdIAoyNTcsiV1H0 +GQ3QtodOPeDlQDNoqinqaobd7R9g3m3hS53Nor7yBZkCWQ5x9v9JxRtoAq0sklh1 +I1X2zEqZk2l6YrfBF/64zWrhjnW3j23szkrAIVu0faQXbQ4z56tmZrw11wARAQAB +/gdlAkdOVQG0CUdOVSBEdW1teYi4BBMBAgAiBQJYIL6HAhsDBgsJCAcDAgYVCAIJ +CgsEFgIDAQIeAQIXgAAKCRCd1xxWp1CYAnjGA/9synn6ZXJUKAXQzySgmCZvCIbl +rqBfEpxwLG4Q/lONhm5vthAE0z49I8hj5Gc5e2tLYUtq0o0OCRdCrYHa/efOYWpJ +6RsK99bePOisVzmOABLIgZkcr022kHoMCmkPgv9CUGKP1yqbGl+zzAwQfUjRUmvD +ZIcWLHi2ge4GzPMPi50B2ARYIL6cAQQAxWHnicKejAFcFcF1/3gUSgSH7eiwuBPX +M7vDdgGzlve1o1jbV4tzrjN9jsCl6r0nJPDMfBSzgLr1auNTRG6HpJ4abcOx86ED +Ad+avDcQPZb7z3dPhH/gb2lQejZsHh7bbeOS8WMSzHV3RqCLd8J/xwWPNR5zKn1f +yp4IGfopidMAEQEAAQAD+wQOelnR82+dxyM2IFmZdOB9wSXQeCVOvxSaNMh6Y3lk +UOOkO8Nlic4x0ungQRvjoRs4wBmCuwFK/MII6jKui0B7dn/NDf51i7rGdNGuJXDH +e676By1sEY/NGkc74jr74T+5GWNU64W0vkpfgVmjSAzsUtpmhJMXsc7beBhJdnVl +AgDKCb8hZqj1alcdmLoNvb7ibA3K/V8J462CPD7bMySPBa/uayoFhNxibpoXml2r +oOtHa5izF3b0/9JY97F6rqkdAgD6GdTJ+xmlCoz1Sewoif1I6krq6xoa7gOYpIXo +UL1Afr+LiJeyAnF/M34j/kjIVmPanZJjry0kkjHE5ILjH3uvAf4/6n9np+Th8ujS +YDCIzKwR7639+H+qccOaddCep8Y6KGUMVdD/vTKEx1rMtK+hK/CDkkkxnFslifMJ +kqoqv3WUqCWJAT0EGAECAAkFAlggvpwCGwIAqAkQndccVqdQmAKdIAQZAQIABgUC +WCC+nAAKCRDmGUholQPwvQk+A/9latnSsR5s5/1A9TFki11GzSEnfLbx46FYOdkW +n3YBxZoPQGxNA1vIn8GmouxZInw9CF4jdOJxEdzLlYQJ9YLTLtN5tQEMl/19/bR8 +/qLacAZ9IOezYRWxxZsyn6//jfl7A0Y+FV59d4YajKkEfItcIIlgVBSW6T+TNQT3 +R+EH5HJ/A/4/AN0CmBhhE2vGzTnVU0VPrE4V64pjn1rufFdclgpixNZCuuqpKpoE +VVHn6mnBf4njKjZrAGPs5kfQ+H4NsM7v3Zz4yV6deu9FZc4O6E+V1WJ38rO8eBix +7G2jko106CC6vtxsCPVIzY7aaG3H5pjRtomw+pX7SzrQ7FUg2PGumg== +=F/T0 +-----END PGP PRIVATE KEY BLOCK-----` + +const ecdsaPrivateKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +xaUEX1KsSRMIKoZIzj0DAQcCAwTpYqJsnJiFhKKh+8TulWD+lVmerBFNS+Ii +B+nlG3T0xQQ4Sy5eIjJ0CExIQQzi3EElF/Z2l4F3WC5taFA11NgA/gkDCHSS +PThf1M2K4LN8F1MRcvR+sb7i0nH55ojkwuVB1DE6jqIT9m9i+mX1tzjSAS+6 +lPQiweCJvG7xTC7Hs3AzRapf/r1At4TB+v+5G2/CKynNFEJpbGwgPGJpbGxA +aG9tZS5jb20+wncEEBMIAB8FAl9SrEkGCwkHCAMCBBUICgIDFgIBAhkBAhsD +Ah4BAAoJEMpwT3+q3+xqw5UBAMebZN9isEZ1ML+R/jWAAWMwa/knMugrEZ1v +Bl9+ZwM0AQCZdf80/wYY4Nve01qSRFv8OmKswLli3TvDv6FKc4cLz8epBF9S +rEkSCCqGSM49AwEHAgMEAjKnT9b5wY2bf9TpAV3d7OUfPOxKj9c4VzeVzSrH +AtQgo/MuI1cdYVURicV4i76DNjFhQHQFTk7BrC+C2u1yqQMBCAf+CQMIHImA +iYfzQtjgQWSFZYUkCFpbbwhNF0ch+3HNaZkaHCnZRIsWsRnc6FCb6lRQyK9+ +Dq59kHlduE5QgY40894jfmP2JdJHU6nBdYrivbEdbMJhBBgTCAAJBQJfUqxJ +AhsMAAoJEMpwT3+q3+xqUI0BAMykhV08kQ4Ip9Qlbss6Jdufv7YrU0Vd5hou +b5TmiPd0APoDBh3qIic+aLLUcAuG3+Gt1P1AbUlmqV61ozn1WfHxfw== +=KLN8 +-----END PGP PRIVATE KEY BLOCK-----` + +const dsaPrivateKeyWithElGamalSubkey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lQOBBF9/MLsRCACeaF6BI0jTgDAs86t8/kXPfwlPvR2MCYzB0BCqAdcq1hV/GTYd +oNmJRna/ZJfsI/vf+d8Nv+EYOQkPheFS1MJVBitkAXjQPgm8i1tQWen1FCWZxqGk +/vwZYF4yo8GhZ+Wxi3w09W9Cp9QM/CTmyE1Xe7wpPBGe+oD+me8Zxjyt8JBS4Qx+ +gvWbfHxfHnggh4pz7U8QkItlLsBNQEdX4R5+zwRN66g2ZSX/shaa/EkVnihUhD7r +njP9I51ORWucTQD6OvgooaNQZCkQ/Se9TzdakwWKS2XSIFXiY/e2E5ZgKI/pfKDU +iA/KessxddPb7nP/05OIJqg9AoDrD4vmehLzAQD+zsUS3LDU1m9/cG4LMsQbT2VK +Te4HqbGIAle+eu/asQf8DDJMrbZpiJZvADum9j0TJ0oep6VdMbzo9RSDKvlLKT9m +kG63H8oDWnCZm1a+HmGq9YIX+JHWmsLXXsFLeEouLzHO+mZo0X28eji3V2T87hyR +MmUM0wFo4k7jK8uVmkDXv3XwNp2uByWxUKZd7EnWmcEZWqIiexJ7XpCS0Pg3tRaI +zxve0SRe/dxfUPnTk/9KQ9hS6DWroBKquL182zx1Fggh4LIWWE2zq+UYn8BI0E8A +rmIDFJdF8ymFQGRrEy6g79NnkPmkrZWsgMRYY65P6v4zLVmqohJKkpm3/Uxa6QAP +CCoPh/JTOvPeCP2bOJH8z4Z9Py3ouMIjofQW8sXqRgf/RIHbh0KsINHrwwZ4gVIr +MK3RofpaYxw1ztPIWb4cMWoWZHH1Pxh7ggTGSBpAhKXkiWw2Rxat8QF5aA7e962c +bLvVv8dqsPrD/RnVJHag89cbPTzjn7gY9elE8EM8ithV3oQkwHTr4avYlpDZsgNd +hUW3YgRwGo31tdzxoG04AcpV2t+07P8XMPr9hsfWs4rHohXPi38Hseu1Ji+dBoWQ +3+1w/HH3o55s+jy4Ruaz78AIrjbmAJq+6rA2mIcCgrhw3DnzuwQAKeBvSeqn9zfS +ZC812osMBVmkycwelpaIh64WZ0vWL3GvdXDctV2kXM+qVpDTLEny0LuiXxrwCKQL +Ev4HAwK9uQBcreDEEud7pfRb8EYP5lzO2ZA7RaIvje6EWAGBvJGMRT0QQE5SGqc7 +Fw5geigBdt+vVyRuNNhg3c2fdn/OBQaYu0J/8AiOogG8EaM8tCFlbGdhbWFsQGRz +YS5jb20gPGVsZ2FtYWxAZHNhLmNvbT6IkAQTEQgAOBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheAAAoJEHAQaE/r +sWC5A4EA/0GcJmyPtN+Klc7b9sVT3JgKTRnB/URxOJfYJofP0hZLAQCkqyMO+adV +JvbgDH0zaITQWZSSXPqpgMpCA6juTrDsd50CawRffzC7EAgAxFFFSAAEQzWTgKU5 +EBtpxxoPzHqcChawTHRxHxjcELXzmUBS5PzfA1HXSPnNqK/x3Ut5ycC3CsW41Fnt +Gm3706Wu9VFbFZVn55F9lPiplUo61n5pqMvOr1gmuQsdXiTa0t5FRa4TZ2VSiHFw +vdAVSPTUsT4ZxJ1rPyFYRtq1n3pQcvdZowd07r0JnzTMjLLMFYCKhwIowoOC4zqJ +iB8enjwOlpaqBATRm9xpVF7SJkroPF6/B1vdhj7E3c1aJyHlo0PYBAg756sSHWHg +UuLyUQ4TA0hcCVenn/L/aSY2LnbdZB1EBhlYjA7dTCgwIqsQhfQmPkjz6g64A7+Y +HbbrLwADBQgAk14QIEQ+J/VHetpQV/jt2pNsFK1kVK7mXK0spTExaC2yj2sXlHjL +Ie3bO5T/KqmIaBEB5db5fA5xK9cZt79qrQHDKsEqUetUeMUWLBx77zBsus3grIgy +bwDZKseRzQ715pwxquxQlScGoDIBKEh08HpwHkq140eIj3w+MAIfndaZaSCNaxaP +Snky7BQmJ7Wc7qrIwoQP6yrnUqyW2yNi81nJYUhxjChqaFSlwzLs/iNGryBKo0ic +BqVIRjikKHBlwBng6WyrltQo/Vt9GG8w+lqaAVXbJRlaBZJUR+2NKi/YhP3qQse3 +v8fi4kns0gh5LK+2C01RvdX4T49QSExuIf4HAwLJqYIGwadA2uem5v7/765ZtFWV +oL0iZ0ueTJDby4wTFDpLVzzDi/uVcB0ZRFrGOp7w6OYcNYTtV8n3xmli2Q5Trw0c +wZVzvg+ABKWiv7faBjMczIFF8y6WZKOIeAQYEQgAIBYhBI+gnfiHQxB35/Dp0XAQ +aE/rsWC5BQJffzC7AhsMAAoJEHAQaE/rsWC5ZmIA/jhS4r4lClbvjuPWt0Yqdn7R +fss2SPMYvMrrDh42aE0OAQD8xn4G6CN8UtW9xihXOY6FpxiJ/sMc2VaneeUd34oa +4g== +=XZm8 +-----END PGP PRIVATE KEY BLOCK-----` + +// https://tests.sequoia-pgp.org/#Certificate_expiration +// P _ U p +const expiringPrimaryUIDKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- + +xsDNBF2lnPIBDAC5cL9PQoQLTMuhjbYvb4Ncuuo0bfmgPRFywX53jPhoFf4Zg6mv +/seOXpgecTdOcVttfzC8ycIKrt3aQTiwOG/ctaR4Bk/t6ayNFfdUNxHWk4WCKzdz +/56fW2O0F23qIRd8UUJp5IIlN4RDdRCtdhVQIAuzvp2oVy/LaS2kxQoKvph/5pQ/ +5whqsyroEWDJoSV0yOb25B/iwk/pLUFoyhDG9bj0kIzDxrEqW+7Ba8nocQlecMF3 +X5KMN5kp2zraLv9dlBBpWW43XktjcCZgMy20SouraVma8Je/ECwUWYUiAZxLIlMv +9CurEOtxUw6N3RdOtLmYZS9uEnn5y1UkF88o8Nku890uk6BrewFzJyLAx5wRZ4F0 +qV/yq36UWQ0JB/AUGhHVPdFf6pl6eaxBwT5GXvbBUibtf8YI2og5RsgTWtXfU7eb +SGXrl5ZMpbA6mbfhd0R8aPxWfmDWiIOhBufhMCvUHh1sApMKVZnvIff9/0Dca3wb +vLIwa3T4CyshfT0AEQEAAc0hQm9iIEJhYmJhZ2UgPGJvYkBvcGVucGdwLmV4YW1w +bGU+wsEUBBMBCgBIBYJfiWKKBYkBX+/UBQsJCAcCCRD7/MgqAV5zMAYVCgkICwIE +FgIDAQIXgAIbAwIeARYhBNGmbhojsYLJmA94jPv8yCoBXnMwAAAf0wv/a++a3DkL +CttbK4LRIiSry2wb97mxYQoWYzvkKYD1IP/KiamRwzjBKuRSE0qZ2uonQDRGc+zl +1H9dwkDLL4T9uJngCCPCBgFW/hOFPvF+WYEKHtOzunqx6KwDHkpdH+hpzfFzIhDo +aXiVnDGvJ3H/bVTKq1m2KPXO2ckkXPQXJX9Fx6kPHdvcq+3ZI75IzbD/ue5lBcsy +OKLzVu+KLxzlzGui6v1V0fTvU/uqvHlvUxcDAqMnIsDUPakjK2RDeQ38qtN6+PQ5 +/dT7vx2Wtzesqn2eDbDf5uRfSgmp2hJLJniAKjMCBVAJiOgPb0LXUIcwCGxaiWOA +g5ZvNWjX5bZ/FxqpLpOE9OReI5YY7ns7zqP4thLYYWe0Qdp9a2ezVrgzgrh/SLla +d73x/S9TrmLtYGGlbVUByJW+GXjW2Tt6iaa/WDFzx8NvZ/wzIAdGSEfLcvS+JBSP +2ppdY5Ac/2dK3PzYABkHvB/rhXIwlXnrFDU9efRHZfFqQqGauA64wfdIzsDNBF2l +nPIBDADWML9cbGMrp12CtF9b2P6z9TTT74S8iyBOzaSvdGDQY/sUtZXRg21HWamX +nn9sSXvIDEINOQ6A9QxdxoqWdCHrOuW3ofneYXoG+zeKc4dC86wa1TR2q9vW+RMX +SO4uImA+Uzula/6k1DogDf28qhCxMwG/i/m9g1c/0aApuDyKdQ1PXsHHNlgd/Dn6 +rrd5y2AObaifV7wIhEJnvqgFXDN2RXGjLeCOHV4Q2WTYPg/S4k1nMXVDwZXrvIsA +0YwIMgIT86Rafp1qKlgPNbiIlC1g9RY/iFaGN2b4Ir6GDohBQSfZW2+LXoPZuVE/ +wGlQ01rh827KVZW4lXvqsge+wtnWlszcselGATyzqOK9LdHPdZGzROZYI2e8c+pa +LNDdVPL6vdRBUnkCaEkOtl1mr2JpQi5nTU+gTX4IeInC7E+1a9UDF/Y85ybUz8XV +8rUnR76UqVC7KidNepdHbZjjXCt8/Zo+Tec9JNbYNQB/e9ExmDntmlHEsSEQzFwz +j8sxH48AEQEAAcLA9gQYAQoAIBYhBNGmbhojsYLJmA94jPv8yCoBXnMwBQJdpZzy +AhsMAAoJEPv8yCoBXnMw6f8L/26C34dkjBffTzMj5Bdzm8MtF67OYneJ4TQMw7+4 +1IL4rVcSKhIhk/3Ud5knaRtP2ef1+5F66h9/RPQOJ5+tvBwhBAcUWSupKnUrdVaZ +QanYmtSxcVV2PL9+QEiNN3tzluhaWO//rACxJ+K/ZXQlIzwQVTpNhfGzAaMVV9zp +f3u0k14itcv6alKY8+rLZvO1wIIeRZLmU0tZDD5HtWDvUV7rIFI1WuoLb+KZgbYn +3OWjCPHVdTrdZ2CqnZbG3SXw6awH9bzRLV9EXkbhIMez0deCVdeo+wFFklh8/5VK +2b0vk/+wqMJxfpa1lHvJLobzOP9fvrswsr92MA2+k901WeISR7qEzcI0Fdg8AyFA +ExaEK6VyjP7SXGLwvfisw34OxuZr3qmx1Sufu4toH3XrB7QJN8XyqqbsGxUCBqWi +f9RSK4xjzRTe56iPeiSJJOIciMP9i2ldI+KgLycyeDvGoBj0HCLO3gVaBe4ubVrj +5KjhX2PVNEJd3XZRzaXZE2aAMQ== +=522n +-----END PGP PUBLIC KEY BLOCK-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go new file mode 100644 index 0000000000..7350974eff --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_config.go @@ -0,0 +1,56 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import "math/bits" + +// AEADConfig collects a number of AEAD parameters along with sensible defaults. +// A nil AEADConfig is valid and results in all default values. +type AEADConfig struct { + // The AEAD mode of operation. + DefaultMode AEADMode + // Amount of octets in each chunk of data + ChunkSize uint64 +} + +// Mode returns the AEAD mode of operation. +func (conf *AEADConfig) Mode() AEADMode { + if conf == nil || conf.DefaultMode == 0 { + return AEADModeEAX + } + mode := conf.DefaultMode + if mode != AEADModeEAX && mode != AEADModeOCB && + mode != AEADModeExperimentalGCM { + panic("AEAD mode unsupported") + } + return mode +} + +// ChunkSizeByte returns the byte indicating the chunk size. The effective +// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6) +func (conf *AEADConfig) ChunkSizeByte() byte { + if conf == nil || conf.ChunkSize == 0 { + return 12 // 1 << (12 + 6) == 262144 bytes + } + + chunkSize := conf.ChunkSize + exponent := bits.Len64(chunkSize) - 1 + switch { + case exponent < 6: + exponent = 6 + case exponent > 27: + exponent = 27 + } + + return byte(exponent - 6) +} + +// decodeAEADChunkSize returns the effective chunk size. In 32-bit systems, the +// maximum returned value is 1 << 30. +func decodeAEADChunkSize(c byte) int { + size := uint64(1 << (c + 6)) + if size != uint64(int(size)) { + return 1 << 30 + } + return int(size) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go new file mode 100644 index 0000000000..85c53f08aa --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/aead_encrypted.go @@ -0,0 +1,364 @@ +// Copyright (C) 2019 ProtonTech AG + +package packet + +import ( + "bytes" + "crypto/cipher" + "crypto/rand" + "encoding/binary" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// AEADEncrypted represents an AEAD Encrypted Packet (tag 20, RFC4880bis-5.16). +type AEADEncrypted struct { + cipher CipherFunction + mode AEADMode + chunkSizeByte byte + Contents io.Reader // Encrypted chunks and tags + initialNonce []byte // Referred to as IV in RFC4880-bis +} + +// Only currently defined version +const aeadEncryptedVersion = 1 + +// An AEAD opener/sealer, its configuration, and data for en/decryption. +type aeadCrypter struct { + aead cipher.AEAD + chunkSize int + initialNonce []byte + associatedData []byte // Chunk-independent associated data + chunkIndex []byte // Chunk counter + bytesProcessed int // Amount of plaintext bytes encrypted/decrypted + buffer bytes.Buffer // Buffered bytes accross chunks +} + +// aeadEncrypter encrypts and writes bytes. It encrypts when necessary according +// to the AEAD block size, and buffers the extra encrypted bytes for next write. +type aeadEncrypter struct { + aeadCrypter // Embedded plaintext sealer + writer io.WriteCloser // 'writer' is a partialLengthWriter +} + +// aeadDecrypter reads and decrypts bytes. It buffers extra decrypted bytes when +// necessary, similar to aeadEncrypter. +type aeadDecrypter struct { + aeadCrypter // Embedded ciphertext opener + reader io.Reader // 'reader' is a partialLengthReader + peekedBytes []byte // Used to detect last chunk + eof bool +} + +func (ae *AEADEncrypted) parse(buf io.Reader) error { + headerData := make([]byte, 4) + if n, err := io.ReadFull(buf, headerData); n < 4 { + return errors.AEADError("could not read aead header:" + err.Error()) + } + // Read initial nonce + mode := AEADMode(headerData[2]) + nonceLen := mode.NonceLength() + if nonceLen == 0 { + return errors.AEADError("unknown mode") + } + initialNonce := make([]byte, nonceLen) + if n, err := io.ReadFull(buf, initialNonce); n < nonceLen { + return errors.AEADError("could not read aead nonce:" + err.Error()) + } + ae.Contents = buf + ae.initialNonce = initialNonce + c := headerData[1] + if _, ok := algorithm.CipherById[c]; !ok { + return errors.UnsupportedError("unknown cipher: " + string(c)) + } + ae.cipher = CipherFunction(c) + ae.mode = mode + ae.chunkSizeByte = byte(headerData[3]) + return nil +} + +// Decrypt returns a io.ReadCloser from which decrypted bytes can be read, or +// an error. +func (ae *AEADEncrypted) Decrypt(ciph CipherFunction, key []byte) (io.ReadCloser, error) { + return ae.decrypt(key) +} + +// decrypt prepares an aeadCrypter and returns a ReadCloser from which +// decrypted bytes can be read (see aeadDecrypter.Read()). +func (ae *AEADEncrypted) decrypt(key []byte) (io.ReadCloser, error) { + blockCipher := ae.cipher.new(key) + aead := ae.mode.new(blockCipher) + // Carry the first tagLen bytes + tagLen := ae.mode.TagLength() + peekedBytes := make([]byte, tagLen) + n, err := io.ReadFull(ae.Contents, peekedBytes) + if n < tagLen || (err != nil && err != io.EOF) { + return nil, errors.AEADError("Not enough data to decrypt:" + err.Error()) + } + chunkSize := decodeAEADChunkSize(ae.chunkSizeByte) + return &aeadDecrypter{ + aeadCrypter: aeadCrypter{ + aead: aead, + chunkSize: chunkSize, + initialNonce: ae.initialNonce, + associatedData: ae.associatedData(), + chunkIndex: make([]byte, 8), + }, + reader: ae.Contents, + peekedBytes: peekedBytes}, nil +} + +// Read decrypts bytes and reads them into dst. It decrypts when necessary and +// buffers extra decrypted bytes. It returns the number of bytes copied into dst +// and an error. +func (ar *aeadDecrypter) Read(dst []byte) (n int, err error) { + // Return buffered plaintext bytes from previous calls + if ar.buffer.Len() > 0 { + return ar.buffer.Read(dst) + } + + // Return EOF if we've previously validated the final tag + if ar.eof { + return 0, io.EOF + } + + // Read a chunk + tagLen := ar.aead.Overhead() + cipherChunkBuf := new(bytes.Buffer) + _, errRead := io.CopyN(cipherChunkBuf, ar.reader, int64(ar.chunkSize + tagLen)) + cipherChunk := cipherChunkBuf.Bytes() + if errRead != nil && errRead != io.EOF { + return 0, errRead + } + decrypted, errChunk := ar.openChunk(cipherChunk) + if errChunk != nil { + return 0, errChunk + } + + // Return decrypted bytes, buffering if necessary + if len(dst) < len(decrypted) { + n = copy(dst, decrypted[:len(dst)]) + ar.buffer.Write(decrypted[len(dst):]) + } else { + n = copy(dst, decrypted) + } + + // Check final authentication tag + if errRead == io.EOF { + errChunk := ar.validateFinalTag(ar.peekedBytes) + if errChunk != nil { + return n, errChunk + } + ar.eof = true // Mark EOF for when we've returned all buffered data + } + return +} + +// Close is noOp. The final authentication tag of the stream was already +// checked in the last Read call. In the future, this function could be used to +// wipe the reader and peeked, decrypted bytes, if necessary. +func (ar *aeadDecrypter) Close() (err error) { + return nil +} + +// SerializeAEADEncrypted initializes the aeadCrypter and returns a writer. +// This writer encrypts and writes bytes (see aeadEncrypter.Write()). +func SerializeAEADEncrypted(w io.Writer, key []byte, cipher CipherFunction, mode AEADMode, config *Config) (io.WriteCloser, error) { + writeCloser := noOpCloser{w} + writer, err := serializeStreamHeader(writeCloser, packetTypeAEADEncrypted) + if err != nil { + return nil, err + } + + // Data for en/decryption: tag, version, cipher, aead mode, chunk size + aeadConf := config.AEAD() + prefix := []byte{ + 0xD4, + aeadEncryptedVersion, + byte(config.Cipher()), + byte(aeadConf.Mode()), + aeadConf.ChunkSizeByte(), + } + n, err := writer.Write(prefix[1:]) + if err != nil || n < 4 { + return nil, errors.AEADError("could not write AEAD headers") + } + // Sample nonce + nonceLen := aeadConf.Mode().NonceLength() + nonce := make([]byte, nonceLen) + n, err = rand.Read(nonce) + if err != nil { + panic("Could not sample random nonce") + } + _, err = writer.Write(nonce) + if err != nil { + return nil, err + } + blockCipher := CipherFunction(config.Cipher()).new(key) + alg := AEADMode(aeadConf.Mode()).new(blockCipher) + + chunkSize := decodeAEADChunkSize(aeadConf.ChunkSizeByte()) + return &aeadEncrypter{ + aeadCrypter: aeadCrypter{ + aead: alg, + chunkSize: chunkSize, + associatedData: prefix, + chunkIndex: make([]byte, 8), + initialNonce: nonce, + }, + writer: writer}, nil +} + +// Write encrypts and writes bytes. It encrypts when necessary and buffers extra +// plaintext bytes for next call. When the stream is finished, Close() MUST be +// called to append the final tag. +func (aw *aeadEncrypter) Write(plaintextBytes []byte) (n int, err error) { + // Append plaintextBytes to existing buffered bytes + n, err = aw.buffer.Write(plaintextBytes) + if err != nil { + return n, err + } + // Encrypt and write chunks + for aw.buffer.Len() >= aw.chunkSize { + plainChunk := aw.buffer.Next(aw.chunkSize) + encryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return n, err + } + _, err = aw.writer.Write(encryptedChunk) + if err != nil { + return n, err + } + } + return +} + +// Close encrypts and writes the remaining buffered plaintext if any, appends +// the final authentication tag, and closes the embedded writer. This function +// MUST be called at the end of a stream. +func (aw *aeadEncrypter) Close() (err error) { + // Encrypt and write a chunk if there's buffered data left, or if we haven't + // written any chunks yet. + if aw.buffer.Len() > 0 || aw.bytesProcessed == 0 { + plainChunk := aw.buffer.Bytes() + lastEncryptedChunk, err := aw.sealChunk(plainChunk) + if err != nil { + return err + } + _, err = aw.writer.Write(lastEncryptedChunk) + if err != nil { + return err + } + } + // Compute final tag (associated data: packet tag, version, cipher, aead, + // chunk size, index, total number of encrypted octets). + adata := append(aw.associatedData[:], aw.chunkIndex[:]...) + adata = append(adata, make([]byte, 8)...) + binary.BigEndian.PutUint64(adata[13:], uint64(aw.bytesProcessed)) + nonce := aw.computeNextNonce() + finalTag := aw.aead.Seal(nil, nonce, nil, adata) + _, err = aw.writer.Write(finalTag) + if err != nil { + return err + } + return aw.writer.Close() +} + +// sealChunk Encrypts and authenticates the given chunk. +func (aw *aeadEncrypter) sealChunk(data []byte) ([]byte, error) { + if len(data) > aw.chunkSize { + return nil, errors.AEADError("chunk exceeds maximum length") + } + if aw.associatedData == nil { + return nil, errors.AEADError("can't seal without headers") + } + adata := append(aw.associatedData, aw.chunkIndex...) + nonce := aw.computeNextNonce() + encrypted := aw.aead.Seal(nil, nonce, data, adata) + aw.bytesProcessed += len(data) + if err := aw.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return encrypted, nil +} + +// openChunk decrypts and checks integrity of an encrypted chunk, returning +// the underlying plaintext and an error. It access peeked bytes from next +// chunk, to identify the last chunk and decrypt/validate accordingly. +func (ar *aeadDecrypter) openChunk(data []byte) ([]byte, error) { + tagLen := ar.aead.Overhead() + // Restore carried bytes from last call + chunkExtra := append(ar.peekedBytes, data...) + // 'chunk' contains encrypted bytes, followed by an authentication tag. + chunk := chunkExtra[:len(chunkExtra)-tagLen] + ar.peekedBytes = chunkExtra[len(chunkExtra)-tagLen:] + adata := append(ar.associatedData, ar.chunkIndex...) + nonce := ar.computeNextNonce() + plainChunk, err := ar.aead.Open(nil, nonce, chunk, adata) + if err != nil { + return nil, err + } + ar.bytesProcessed += len(plainChunk) + if err = ar.aeadCrypter.incrementIndex(); err != nil { + return nil, err + } + return plainChunk, nil +} + +// Checks the summary tag. It takes into account the total decrypted bytes into +// the associated data. It returns an error, or nil if the tag is valid. +func (ar *aeadDecrypter) validateFinalTag(tag []byte) error { + // Associated: tag, version, cipher, aead, chunk size, index, and octets + amountBytes := make([]byte, 8) + binary.BigEndian.PutUint64(amountBytes, uint64(ar.bytesProcessed)) + adata := append(ar.associatedData, ar.chunkIndex...) + adata = append(adata, amountBytes...) + nonce := ar.computeNextNonce() + _, err := ar.aead.Open(nil, nonce, tag, adata) + if err != nil { + return err + } + return nil +} + +// Associated data for chunks: tag, version, cipher, mode, chunk size byte +func (ae *AEADEncrypted) associatedData() []byte { + return []byte{ + 0xD4, + aeadEncryptedVersion, + byte(ae.cipher), + byte(ae.mode), + ae.chunkSizeByte} +} + +// computeNonce takes the incremental index and computes an eXclusive OR with +// the least significant 8 bytes of the receivers' initial nonce (see sec. +// 5.16.1 and 5.16.2). It returns the resulting nonce. +func (wo *aeadCrypter) computeNextNonce() (nonce []byte) { + nonce = make([]byte, len(wo.initialNonce)) + copy(nonce, wo.initialNonce) + offset := len(wo.initialNonce) - 8 + for i := 0; i < 8; i++ { + nonce[i+offset] ^= wo.chunkIndex[i] + } + return +} + +// incrementIndex perfoms an integer increment by 1 of the integer represented by the +// slice, modifying it accordingly. +func (wo *aeadCrypter) incrementIndex() error { + index := wo.chunkIndex + if len(index) == 0 { + return errors.AEADError("Index has length 0") + } + for i := len(index) - 1; i >= 0; i-- { + if index[i] < 255 { + index[i]++ + return nil + } + index[i] = 0 + } + return errors.AEADError("cannot further increment index") +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go new file mode 100644 index 0000000000..c55cecd357 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/compressed.go @@ -0,0 +1,123 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "compress/bzip2" + "compress/flate" + "compress/zlib" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "io" + "strconv" +) + +// Compressed represents a compressed OpenPGP packet. The decompressed contents +// will contain more OpenPGP packets. See RFC 4880, section 5.6. +type Compressed struct { + Body io.Reader +} + +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression +) + +// CompressionConfig contains compressor configuration settings. +type CompressionConfig struct { + // Level is the compression level to use. It must be set to + // between -1 and 9, with -1 causing the compressor to use the + // default compression level, 0 causing the compressor to use + // no compression and 1 to 9 representing increasing (better, + // slower) compression levels. If Level is less than -1 or + // more then 9, a non-nil error will be returned during + // encryption. See the constants above for convenient common + // settings for Level. + Level int +} + +func (c *Compressed) parse(r io.Reader) error { + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + + switch buf[0] { + case 1: + c.Body = flate.NewReader(r) + case 2: + c.Body, err = zlib.NewReader(r) + case 3: + c.Body = bzip2.NewReader(r) + default: + err = errors.UnsupportedError("unknown compression algorithm: " + strconv.Itoa(int(buf[0]))) + } + + return err +} + +// compressedWriterCloser represents the serialized compression stream +// header and the compressor. Its Close() method ensures that both the +// compressor and serialized stream header are closed. Its Write() +// method writes to the compressor. +type compressedWriteCloser struct { + sh io.Closer // Stream Header + c io.WriteCloser // Compressor +} + +func (cwc compressedWriteCloser) Write(p []byte) (int, error) { + return cwc.c.Write(p) +} + +func (cwc compressedWriteCloser) Close() (err error) { + err = cwc.c.Close() + if err != nil { + return err + } + + return cwc.sh.Close() +} + +// SerializeCompressed serializes a compressed data packet to w and +// returns a WriteCloser to which the literal data packets themselves +// can be written and which MUST be closed on completion. If cc is +// nil, sensible defaults will be used to configure the compression +// algorithm. +func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *CompressionConfig) (literaldata io.WriteCloser, err error) { + compressed, err := serializeStreamHeader(w, packetTypeCompressed) + if err != nil { + return + } + + _, err = compressed.Write([]byte{uint8(algo)}) + if err != nil { + return + } + + level := DefaultCompression + if cc != nil { + level = cc.Level + } + + var compressor io.WriteCloser + switch algo { + case CompressionZIP: + compressor, err = flate.NewWriter(compressed, level) + case CompressionZLIB: + compressor, err = zlib.NewWriterLevel(compressed, level) + default: + s := strconv.Itoa(int(algo)) + err = errors.UnsupportedError("Unsupported compression algorithm: " + s) + } + if err != nil { + return + } + + literaldata = compressedWriteCloser{compressed, compressor} + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go new file mode 100644 index 0000000000..5652dd8148 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/config.go @@ -0,0 +1,167 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rand" + "io" + "math/big" + "time" +) + +// Config collects a number of parameters along with sensible defaults. +// A nil *Config is valid and results in all default values. +type Config struct { + // Rand provides the source of entropy. + // If nil, the crypto/rand Reader is used. + Rand io.Reader + // DefaultHash is the default hash function to be used. + // If zero, SHA-256 is used. + DefaultHash crypto.Hash + // DefaultCipher is the cipher to be used. + // If zero, AES-128 is used. + DefaultCipher CipherFunction + // Time returns the current time as the number of seconds since the + // epoch. If Time is nil, time.Now is used. + Time func() time.Time + // DefaultCompressionAlgo is the compression algorithm to be + // applied to the plaintext before encryption. If zero, no + // compression is done. + DefaultCompressionAlgo CompressionAlgo + // CompressionConfig configures the compression settings. + CompressionConfig *CompressionConfig + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 1024 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 65536 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. When set, it is strongly encrouraged to + // use a value that is at least 65536. See RFC 4880 Section + // 3.7.1.3. + S2KCount int + // RSABits is the number of bits in new RSA keys made with NewEntity. + // If zero, then 2048 bit keys are created. + RSABits int + // The public key algorithm to use - will always create a signing primary + // key and encryption subkey. + Algorithm PublicKeyAlgorithm + // Some known primes that are optionally prepopulated by the caller + RSAPrimes []*big.Int + // AEADConfig configures the use of the new AEAD Encrypted Data Packet, + // defined in the draft of the next version of the OpenPGP specification. + // If a non-nil AEADConfig is passed, usage of this packet is enabled. By + // default, it is disabled. See the documentation of AEADConfig for more + // configuration options related to AEAD. + // **Note: using this option may break compatibility with other OpenPGP + // implementations, as well as future versions of this library.** + AEADConfig *AEADConfig + // V5Keys configures version 5 key generation. If false, this package still + // supports version 5 keys, but produces version 4 keys. + V5Keys bool + // "The validity period of the key. This is the number of seconds after + // the key creation time that the key expires. If this is not present + // or has a value of zero, the key never expires. This is found only on + // a self-signature."" + // https://tools.ietf.org/html/rfc4880#section-5.2.3.6 + KeyLifetimeSecs uint32 + // "The validity period of the signature. This is the number of seconds + // after the signature creation time that the signature expires. If + // this is not present or has a value of zero, it never expires." + // https://tools.ietf.org/html/rfc4880#section-5.2.3.10 + SigLifetimeSecs uint32 + // SigningKeyId is used to specify the signing key to use (by Key ID). + // By default, the signing key is selected automatically, preferring + // signing subkeys if available. + SigningKeyId uint64 +} + +func (c *Config) Random() io.Reader { + if c == nil || c.Rand == nil { + return rand.Reader + } + return c.Rand +} + +func (c *Config) Hash() crypto.Hash { + if c == nil || uint(c.DefaultHash) == 0 { + return crypto.SHA256 + } + return c.DefaultHash +} + +func (c *Config) Cipher() CipherFunction { + if c == nil || uint8(c.DefaultCipher) == 0 { + return CipherAES128 + } + return c.DefaultCipher +} + +func (c *Config) Now() time.Time { + if c == nil || c.Time == nil { + return time.Now() + } + return c.Time() +} + +// KeyLifetime returns the validity period of the key. +func (c *Config) KeyLifetime() uint32 { + if c == nil { + return 0 + } + return c.KeyLifetimeSecs +} + +// SigLifetime returns the validity period of the signature. +func (c *Config) SigLifetime() uint32 { + if c == nil { + return 0 + } + return c.SigLifetimeSecs +} + +func (c *Config) Compression() CompressionAlgo { + if c == nil { + return CompressionNone + } + return c.DefaultCompressionAlgo +} + +func (c *Config) PasswordHashIterations() int { + if c == nil || c.S2KCount == 0 { + return 0 + } + return c.S2KCount +} + +func (c *Config) RSAModulusBits() int { + if c == nil || c.RSABits == 0 { + return 2048 + } + return c.RSABits +} + +func (c *Config) PublicKeyAlgorithm() PublicKeyAlgorithm { + if c == nil || c.Algorithm == 0 { + return PubKeyAlgoRSA + } + return c.Algorithm +} + +func (c *Config) AEAD() *AEADConfig { + if c == nil { + return nil + } + return c.AEADConfig +} + +func (c *Config) SigningKey() uint64 { + if c == nil { + return 0 + } + return c.SigningKeyId +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go new file mode 100644 index 0000000000..801aec92b4 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/encrypted_key.go @@ -0,0 +1,282 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/rsa" + "encoding/binary" + "io" + "math/big" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" +) + +const encryptedKeyVersion = 3 + +// EncryptedKey represents a public-key encrypted session key. See RFC 4880, +// section 5.1. +type EncryptedKey struct { + KeyId uint64 + Algo PublicKeyAlgorithm + CipherFunc CipherFunction // only valid after a successful Decrypt + Key []byte // only valid after a successful Decrypt + + encryptedMPI1, encryptedMPI2 encoding.Field +} + +func (e *EncryptedKey) parse(r io.Reader) (err error) { + var buf [10]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != encryptedKeyVersion { + return errors.UnsupportedError("unknown EncryptedKey version " + strconv.Itoa(int(buf[0]))) + } + e.KeyId = binary.BigEndian.Uint64(buf[1:9]) + e.Algo = PublicKeyAlgorithm(buf[9]) + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoElGamal: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.MPI) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + case PubKeyAlgoECDH: + e.encryptedMPI1 = new(encoding.MPI) + if _, err = e.encryptedMPI1.ReadFrom(r); err != nil { + return + } + + e.encryptedMPI2 = new(encoding.OID) + if _, err = e.encryptedMPI2.ReadFrom(r); err != nil { + return + } + } + _, err = consumeAll(r) + return +} + +func checksumKeyMaterial(key []byte) uint16 { + var checksum uint16 + for _, v := range key { + checksum += uint16(v) + } + return checksum +} + +// Decrypt decrypts an encrypted session key with the given private key. The +// private key must have been decrypted first. +// If config is nil, sensible defaults will be used. +func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { + if e.KeyId != 0 && e.KeyId != priv.KeyId { + return errors.InvalidArgumentError("cannot decrypt encrypted session key for key id " + strconv.FormatUint(e.KeyId, 16) + " with private key id " + strconv.FormatUint(priv.KeyId, 16)) + } + if e.Algo != priv.PubKeyAlgo { + return errors.InvalidArgumentError("cannot decrypt encrypted session key of type " + strconv.Itoa(int(e.Algo)) + " with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + + var err error + var b []byte + + // TODO(agl): use session key decryption routines here to avoid + // padding oracle attacks. + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + // Supports both *rsa.PrivateKey and crypto.Decrypter + k := priv.PrivateKey.(crypto.Decrypter) + b, err = k.Decrypt(config.Random(), padToKeySize(k.Public().(*rsa.PublicKey), e.encryptedMPI1.Bytes()), nil) + case PubKeyAlgoElGamal: + c1 := new(big.Int).SetBytes(e.encryptedMPI1.Bytes()) + c2 := new(big.Int).SetBytes(e.encryptedMPI2.Bytes()) + b, err = elgamal.Decrypt(priv.PrivateKey.(*elgamal.PrivateKey), c1, c2) + case PubKeyAlgoECDH: + vsG := e.encryptedMPI1.Bytes() + m := e.encryptedMPI2.Bytes() + oid := priv.PublicKey.oid.EncodedBytes() + b, err = ecdh.Decrypt(priv.PrivateKey.(*ecdh.PrivateKey), vsG, m, oid, priv.PublicKey.Fingerprint[:]) + default: + err = errors.InvalidArgumentError("cannot decrypt encrypted session key with private key of type " + strconv.Itoa(int(priv.PubKeyAlgo))) + } + + if err != nil { + return err + } + + e.CipherFunc = CipherFunction(b[0]) + e.Key = b[1 : len(b)-2] + expectedChecksum := uint16(b[len(b)-2])<<8 | uint16(b[len(b)-1]) + checksum := checksumKeyMaterial(e.Key) + if checksum != expectedChecksum { + return errors.StructuralError("EncryptedKey checksum incorrect") + } + + return nil +} + +// Serialize writes the encrypted key packet, e, to w. +func (e *EncryptedKey) Serialize(w io.Writer) error { + var mpiLen int + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + case PubKeyAlgoElGamal: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + case PubKeyAlgoECDH: + mpiLen = int(e.encryptedMPI1.EncodedLength()) + int(e.encryptedMPI2.EncodedLength()) + default: + return errors.InvalidArgumentError("don't know how to serialize encrypted key type " + strconv.Itoa(int(e.Algo))) + } + + err := serializeHeader(w, packetTypeEncryptedKey, 1 /* version */ +8 /* key id */ +1 /* algo */ +mpiLen) + if err != nil { + return err + } + + w.Write([]byte{encryptedKeyVersion}) + binary.Write(w, binary.BigEndian, e.KeyId) + w.Write([]byte{byte(e.Algo)}) + + switch e.Algo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + _, err := w.Write(e.encryptedMPI1.EncodedBytes()) + return err + case PubKeyAlgoElGamal: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + case PubKeyAlgoECDH: + if _, err := w.Write(e.encryptedMPI1.EncodedBytes()); err != nil { + return err + } + _, err := w.Write(e.encryptedMPI2.EncodedBytes()) + return err + default: + panic("internal error") + } +} + +// SerializeEncryptedKey serializes an encrypted key packet to w that contains +// key, encrypted to pub. +// If config is nil, sensible defaults will be used. +func SerializeEncryptedKey(w io.Writer, pub *PublicKey, cipherFunc CipherFunction, key []byte, config *Config) error { + var buf [10]byte + buf[0] = encryptedKeyVersion + binary.BigEndian.PutUint64(buf[1:9], pub.KeyId) + buf[9] = byte(pub.PubKeyAlgo) + + keyBlock := make([]byte, 1 /* cipher type */ +len(key)+2 /* checksum */) + keyBlock[0] = byte(cipherFunc) + copy(keyBlock[1:], key) + checksum := checksumKeyMaterial(key) + keyBlock[1+len(key)] = byte(checksum >> 8) + keyBlock[1+len(key)+1] = byte(checksum) + + switch pub.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: + return serializeEncryptedKeyRSA(w, config.Random(), buf, pub.PublicKey.(*rsa.PublicKey), keyBlock) + case PubKeyAlgoElGamal: + return serializeEncryptedKeyElGamal(w, config.Random(), buf, pub.PublicKey.(*elgamal.PublicKey), keyBlock) + case PubKeyAlgoECDH: + return serializeEncryptedKeyECDH(w, config.Random(), buf, pub.PublicKey.(*ecdh.PublicKey), keyBlock, pub.oid, pub.Fingerprint) + case PubKeyAlgoDSA, PubKeyAlgoRSASignOnly: + return errors.InvalidArgumentError("cannot encrypt to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) + } + + return errors.UnsupportedError("encrypting a key to public key of type " + strconv.Itoa(int(pub.PubKeyAlgo))) +} + +func serializeEncryptedKeyRSA(w io.Writer, rand io.Reader, header [10]byte, pub *rsa.PublicKey, keyBlock []byte) error { + cipherText, err := rsa.EncryptPKCS1v15(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("RSA encryption failed: " + err.Error()) + } + + cipherMPI := encoding.NewMPI(cipherText) + packetLen := 10 /* header length */ + int(cipherMPI.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + _, err = w.Write(cipherMPI.EncodedBytes()) + return err +} + +func serializeEncryptedKeyElGamal(w io.Writer, rand io.Reader, header [10]byte, pub *elgamal.PublicKey, keyBlock []byte) error { + c1, c2, err := elgamal.Encrypt(rand, pub, keyBlock) + if err != nil { + return errors.InvalidArgumentError("ElGamal encryption failed: " + err.Error()) + } + + packetLen := 10 /* header length */ + packetLen += 2 /* mpi size */ + (c1.BitLen()+7)/8 + packetLen += 2 /* mpi size */ + (c2.BitLen()+7)/8 + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(new(encoding.MPI).SetBig(c1).EncodedBytes()); err != nil { + return err + } + _, err = w.Write(new(encoding.MPI).SetBig(c2).EncodedBytes()) + return err +} + +func serializeEncryptedKeyECDH(w io.Writer, rand io.Reader, header [10]byte, pub *ecdh.PublicKey, keyBlock []byte, oid encoding.Field, fingerprint []byte) error { + vsG, c, err := ecdh.Encrypt(rand, pub, keyBlock, oid.EncodedBytes(), fingerprint) + if err != nil { + return errors.InvalidArgumentError("ECDH encryption failed: " + err.Error()) + } + + g := encoding.NewMPI(vsG) + m := encoding.NewOID(c) + + packetLen := 10 /* header length */ + packetLen += int(g.EncodedLength()) + int(m.EncodedLength()) + + err = serializeHeader(w, packetTypeEncryptedKey, packetLen) + if err != nil { + return err + } + + _, err = w.Write(header[:]) + if err != nil { + return err + } + if _, err = w.Write(g.EncodedBytes()); err != nil { + return err + } + _, err = w.Write(m.EncodedBytes()) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go new file mode 100644 index 0000000000..4be987609b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/literal.go @@ -0,0 +1,91 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "encoding/binary" + "io" +) + +// LiteralData represents an encrypted file. See RFC 4880, section 5.9. +type LiteralData struct { + Format uint8 + IsBinary bool + FileName string + Time uint32 // Unix epoch time. Either creation time or modification time. 0 means undefined. + Body io.Reader +} + +// ForEyesOnly returns whether the contents of the LiteralData have been marked +// as especially sensitive. +func (l *LiteralData) ForEyesOnly() bool { + return l.FileName == "_CONSOLE" +} + +func (l *LiteralData) parse(r io.Reader) (err error) { + var buf [256]byte + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + + l.Format = buf[0] + l.IsBinary = l.Format == 'b' + fileNameLen := int(buf[1]) + + _, err = readFull(r, buf[:fileNameLen]) + if err != nil { + return + } + + l.FileName = string(buf[:fileNameLen]) + + _, err = readFull(r, buf[:4]) + if err != nil { + return + } + + l.Time = binary.BigEndian.Uint32(buf[:4]) + l.Body = r + return +} + +// SerializeLiteral serializes a literal data packet to w and returns a +// WriteCloser to which the data itself can be written and which MUST be closed +// on completion. The fileName is truncated to 255 bytes. +func SerializeLiteral(w io.WriteCloser, isBinary bool, fileName string, time uint32) (plaintext io.WriteCloser, err error) { + var buf [4]byte + buf[0] = 't' + if isBinary { + buf[0] = 'b' + } + if len(fileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + + inner, err := serializeStreamHeader(w, packetTypeLiteralData) + if err != nil { + return + } + + _, err = inner.Write(buf[:2]) + if err != nil { + return + } + _, err = inner.Write([]byte(fileName)) + if err != nil { + return + } + binary.BigEndian.PutUint32(buf[:], time) + _, err = inner.Write(buf[:]) + if err != nil { + return + } + + plaintext = inner + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go new file mode 100644 index 0000000000..4f26d0a00b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/ocfb.go @@ -0,0 +1,137 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// OpenPGP CFB Mode. http://tools.ietf.org/html/rfc4880#section-13.9 + +package packet + +import ( + "crypto/cipher" +) + +type ocfbEncrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// An OCFBResyncOption determines if the "resynchronization step" of OCFB is +// performed. +type OCFBResyncOption bool + +const ( + OCFBResync OCFBResyncOption = true + OCFBNoResync OCFBResyncOption = false +) + +// NewOCFBEncrypter returns a cipher.Stream which encrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block, and an initial amount of +// ciphertext. randData must be random bytes and be the same length as the +// cipher.Block's block size. Resync determines if the "resynchronization step" +// from RFC 4880, 13.9 step 7 is performed. Different parts of OpenPGP vary on +// this point. +func NewOCFBEncrypter(block cipher.Block, randData []byte, resync OCFBResyncOption) (cipher.Stream, []byte) { + blockSize := block.BlockSize() + if len(randData) != blockSize { + return nil, nil + } + + x := &ocfbEncrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefix := make([]byte, blockSize+2) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefix[i] = randData[i] ^ x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefix[blockSize] = x.fre[0] ^ randData[blockSize-2] + prefix[blockSize+1] = x.fre[1] ^ randData[blockSize-1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + return x, prefix +} + +func (x *ocfbEncrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + x.fre[x.outUsed] ^= src[i] + dst[i] = x.fre[x.outUsed] + x.outUsed++ + } +} + +type ocfbDecrypter struct { + b cipher.Block + fre []byte + outUsed int +} + +// NewOCFBDecrypter returns a cipher.Stream which decrypts data with OpenPGP's +// cipher feedback mode using the given cipher.Block. Prefix must be the first +// blockSize + 2 bytes of the ciphertext, where blockSize is the cipher.Block's +// block size. On successful exit, blockSize+2 bytes of decrypted data are written into +// prefix. Resync determines if the "resynchronization step" from RFC 4880, +// 13.9 step 7 is performed. Different parts of OpenPGP vary on this point. +func NewOCFBDecrypter(block cipher.Block, prefix []byte, resync OCFBResyncOption) cipher.Stream { + blockSize := block.BlockSize() + if len(prefix) != blockSize+2 { + return nil + } + + x := &ocfbDecrypter{ + b: block, + fre: make([]byte, blockSize), + outUsed: 0, + } + prefixCopy := make([]byte, len(prefix)) + copy(prefixCopy, prefix) + + block.Encrypt(x.fre, x.fre) + for i := 0; i < blockSize; i++ { + prefixCopy[i] ^= x.fre[i] + } + + block.Encrypt(x.fre, prefix[:blockSize]) + prefixCopy[blockSize] ^= x.fre[0] + prefixCopy[blockSize+1] ^= x.fre[1] + + if resync { + block.Encrypt(x.fre, prefix[2:]) + } else { + x.fre[0] = prefix[blockSize] + x.fre[1] = prefix[blockSize+1] + x.outUsed = 2 + } + copy(prefix, prefixCopy) + return x +} + +func (x *ocfbDecrypter) XORKeyStream(dst, src []byte) { + for i := 0; i < len(src); i++ { + if x.outUsed == len(x.fre) { + x.b.Encrypt(x.fre, x.fre) + x.outUsed = 0 + } + + c := src[i] + dst[i] = x.fre[x.outUsed] ^ src[i] + x.fre[x.outUsed] = c + x.outUsed++ + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go new file mode 100644 index 0000000000..41c35de219 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/one_pass_signature.go @@ -0,0 +1,73 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "encoding/binary" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "io" + "strconv" +) + +// OnePassSignature represents a one-pass signature packet. See RFC 4880, +// section 5.4. +type OnePassSignature struct { + SigType SignatureType + Hash crypto.Hash + PubKeyAlgo PublicKeyAlgorithm + KeyId uint64 + IsLast bool +} + +const onePassSignatureVersion = 3 + +func (ops *OnePassSignature) parse(r io.Reader) (err error) { + var buf [13]byte + + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != onePassSignatureVersion { + err = errors.UnsupportedError("one-pass-signature packet version " + strconv.Itoa(int(buf[0]))) + } + + var ok bool + ops.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function: " + strconv.Itoa(int(buf[2]))) + } + + ops.SigType = SignatureType(buf[1]) + ops.PubKeyAlgo = PublicKeyAlgorithm(buf[3]) + ops.KeyId = binary.BigEndian.Uint64(buf[4:12]) + ops.IsLast = buf[12] != 0 + return +} + +// Serialize marshals the given OnePassSignature to w. +func (ops *OnePassSignature) Serialize(w io.Writer) error { + var buf [13]byte + buf[0] = onePassSignatureVersion + buf[1] = uint8(ops.SigType) + var ok bool + buf[2], ok = s2k.HashToHashId(ops.Hash) + if !ok { + return errors.UnsupportedError("hash type: " + strconv.Itoa(int(ops.Hash))) + } + buf[3] = uint8(ops.PubKeyAlgo) + binary.BigEndian.PutUint64(buf[4:12], ops.KeyId) + if ops.IsLast { + buf[12] = 1 + } + + if err := serializeHeader(w, packetTypeOnePassSignature, len(buf)); err != nil { + return err + } + _, err := w.Write(buf[:]) + return err +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go new file mode 100644 index 0000000000..e3879199e6 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/opaque.go @@ -0,0 +1,162 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// OpaquePacket represents an OpenPGP packet as raw, unparsed data. This is +// useful for splitting and storing the original packet contents separately, +// handling unsupported packet types or accessing parts of the packet not yet +// implemented by this package. +type OpaquePacket struct { + // Packet type + Tag uint8 + // Reason why the packet was parsed opaquely + Reason error + // Binary contents of the packet data + Contents []byte +} + +func (op *OpaquePacket) parse(r io.Reader) (err error) { + op.Contents, err = ioutil.ReadAll(r) + return +} + +// Serialize marshals the packet to a writer in its original form, including +// the packet header. +func (op *OpaquePacket) Serialize(w io.Writer) (err error) { + err = serializeHeader(w, packetType(op.Tag), len(op.Contents)) + if err == nil { + _, err = w.Write(op.Contents) + } + return +} + +// Parse attempts to parse the opaque contents into a structure supported by +// this package. If the packet is not known then the result will be another +// OpaquePacket. +func (op *OpaquePacket) Parse() (p Packet, err error) { + hdr := bytes.NewBuffer(nil) + err = serializeHeader(hdr, packetType(op.Tag), len(op.Contents)) + if err != nil { + op.Reason = err + return op, err + } + p, err = Read(io.MultiReader(hdr, bytes.NewBuffer(op.Contents))) + if err != nil { + op.Reason = err + p = op + } + return +} + +// OpaqueReader reads OpaquePackets from an io.Reader. +type OpaqueReader struct { + r io.Reader +} + +func NewOpaqueReader(r io.Reader) *OpaqueReader { + return &OpaqueReader{r: r} +} + +// Read the next OpaquePacket. +func (or *OpaqueReader) Next() (op *OpaquePacket, err error) { + tag, _, contents, err := readHeader(or.r) + if err != nil { + return + } + op = &OpaquePacket{Tag: uint8(tag), Reason: err} + err = op.parse(contents) + if err != nil { + consumeAll(contents) + } + return +} + +// OpaqueSubpacket represents an unparsed OpenPGP subpacket, +// as found in signature and user attribute packets. +type OpaqueSubpacket struct { + SubType uint8 + Contents []byte +} + +// OpaqueSubpackets extracts opaque, unparsed OpenPGP subpackets from +// their byte representation. +func OpaqueSubpackets(contents []byte) (result []*OpaqueSubpacket, err error) { + var ( + subHeaderLen int + subPacket *OpaqueSubpacket + ) + for len(contents) > 0 { + subHeaderLen, subPacket, err = nextSubpacket(contents) + if err != nil { + break + } + result = append(result, subPacket) + contents = contents[subHeaderLen+len(subPacket.Contents):] + } + return +} + +func nextSubpacket(contents []byte) (subHeaderLen int, subPacket *OpaqueSubpacket, err error) { + // RFC 4880, section 5.2.3.1 + var subLen uint32 + if len(contents) < 1 { + goto Truncated + } + subPacket = &OpaqueSubpacket{} + switch { + case contents[0] < 192: + subHeaderLen = 2 // 1 length byte, 1 subtype byte + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]) + contents = contents[1:] + case contents[0] < 255: + subHeaderLen = 3 // 2 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[0]-192)<<8 + uint32(contents[1]) + 192 + contents = contents[2:] + default: + subHeaderLen = 6 // 5 length bytes, 1 subtype + if len(contents) < subHeaderLen { + goto Truncated + } + subLen = uint32(contents[1])<<24 | + uint32(contents[2])<<16 | + uint32(contents[3])<<8 | + uint32(contents[4]) + contents = contents[5:] + } + if subLen > uint32(len(contents)) || subLen == 0 { + goto Truncated + } + subPacket.SubType = contents[0] + subPacket.Contents = contents[1:subLen] + return +Truncated: + err = errors.StructuralError("subpacket truncated") + return +} + +func (osp *OpaqueSubpacket) Serialize(w io.Writer) (err error) { + buf := make([]byte, 6) + n := serializeSubpacketLength(buf, len(osp.Contents)+1) + buf[n] = osp.SubType + if _, err = w.Write(buf[:n+1]); err != nil { + return + } + _, err = w.Write(osp.Contents) + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go new file mode 100644 index 0000000000..fe0da9d368 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/packet.go @@ -0,0 +1,522 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package packet implements parsing and serialization of OpenPGP packets, as +// specified in RFC 4880. +package packet // import "github.com/ProtonMail/go-crypto/openpgp/packet" + +import ( + "bytes" + "crypto/cipher" + "crypto/rsa" + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// readFull is the same as io.ReadFull except that reading zero bytes returns +// ErrUnexpectedEOF rather than EOF. +func readFull(r io.Reader, buf []byte) (n int, err error) { + n, err = io.ReadFull(r, buf) + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readLength reads an OpenPGP length from r. See RFC 4880, section 4.2.2. +func readLength(r io.Reader) (length int64, isPartial bool, err error) { + var buf [4]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + switch { + case buf[0] < 192: + length = int64(buf[0]) + case buf[0] < 224: + length = int64(buf[0]-192) << 8 + _, err = readFull(r, buf[0:1]) + if err != nil { + return + } + length += int64(buf[0]) + 192 + case buf[0] < 255: + length = int64(1) << (buf[0] & 0x1f) + isPartial = true + default: + _, err = readFull(r, buf[0:4]) + if err != nil { + return + } + length = int64(buf[0])<<24 | + int64(buf[1])<<16 | + int64(buf[2])<<8 | + int64(buf[3]) + } + return +} + +// partialLengthReader wraps an io.Reader and handles OpenPGP partial lengths. +// The continuation lengths are parsed and removed from the stream and EOF is +// returned at the end of the packet. See RFC 4880, section 4.2.2.4. +type partialLengthReader struct { + r io.Reader + remaining int64 + isPartial bool +} + +func (r *partialLengthReader) Read(p []byte) (n int, err error) { + for r.remaining == 0 { + if !r.isPartial { + return 0, io.EOF + } + r.remaining, r.isPartial, err = readLength(r.r) + if err != nil { + return 0, err + } + } + + toRead := int64(len(p)) + if toRead > r.remaining { + toRead = r.remaining + } + + n, err = r.r.Read(p[:int(toRead)]) + r.remaining -= int64(n) + if n < int(toRead) && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// partialLengthWriter writes a stream of data using OpenPGP partial lengths. +// See RFC 4880, section 4.2.2.4. +type partialLengthWriter struct { + w io.WriteCloser + buf bytes.Buffer + lengthByte [1]byte +} + +func (w *partialLengthWriter) Write(p []byte) (n int, err error) { + bufLen := w.buf.Len() + if bufLen > 512 { + for power := uint(30); ; power-- { + l := 1 << power + if bufLen >= l { + w.lengthByte[0] = 224 + uint8(power) + _, err = w.w.Write(w.lengthByte[:]) + if err != nil { + return + } + var m int + m, err = w.w.Write(w.buf.Next(l)) + if err != nil { + return + } + if m != l { + return 0, io.ErrShortWrite + } + break + } + } + } + return w.buf.Write(p) +} + +func (w *partialLengthWriter) Close() (err error) { + len := w.buf.Len() + err = serializeLength(w.w, len) + if err != nil { + return err + } + _, err = w.buf.WriteTo(w.w) + if err != nil { + return err + } + return w.w.Close() +} + +// A spanReader is an io.LimitReader, but it returns ErrUnexpectedEOF if the +// underlying Reader returns EOF before the limit has been reached. +type spanReader struct { + r io.Reader + n int64 +} + +func (l *spanReader) Read(p []byte) (n int, err error) { + if l.n <= 0 { + return 0, io.EOF + } + if int64(len(p)) > l.n { + p = p[0:l.n] + } + n, err = l.r.Read(p) + l.n -= int64(n) + if l.n > 0 && err == io.EOF { + err = io.ErrUnexpectedEOF + } + return +} + +// readHeader parses a packet header and returns an io.Reader which will return +// the contents of the packet. See RFC 4880, section 4.2. +func readHeader(r io.Reader) (tag packetType, length int64, contents io.Reader, err error) { + var buf [4]byte + _, err = io.ReadFull(r, buf[:1]) + if err != nil { + return + } + if buf[0]&0x80 == 0 { + err = errors.StructuralError("tag byte does not have MSB set") + return + } + if buf[0]&0x40 == 0 { + // Old format packet + tag = packetType((buf[0] & 0x3f) >> 2) + lengthType := buf[0] & 3 + if lengthType == 3 { + length = -1 + contents = r + return + } + lengthBytes := 1 << lengthType + _, err = readFull(r, buf[0:lengthBytes]) + if err != nil { + return + } + for i := 0; i < lengthBytes; i++ { + length <<= 8 + length |= int64(buf[i]) + } + contents = &spanReader{r, length} + return + } + + // New format packet + tag = packetType(buf[0] & 0x3f) + length, isPartial, err := readLength(r) + if err != nil { + return + } + if isPartial { + contents = &partialLengthReader{ + remaining: length, + isPartial: true, + r: r, + } + length = -1 + } else { + contents = &spanReader{r, length} + } + return +} + +// serializeHeader writes an OpenPGP packet header to w. See RFC 4880, section +// 4.2. +func serializeHeader(w io.Writer, ptype packetType, length int) (err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + return serializeLength(w, length) +} + +// serializeType writes an OpenPGP packet type to w. See RFC 4880, section +// 4.2. +func serializeType(w io.Writer, ptype packetType) (err error) { + var buf [1]byte + buf[0] = 0x80 | 0x40 | byte(ptype) + _, err = w.Write(buf[:]) + return +} + +// serializeLength writes an OpenPGP packet length to w. See RFC 4880, section +// 4.2.2. +func serializeLength(w io.Writer, length int) (err error) { + var buf [5]byte + var n int + + if length < 192 { + buf[0] = byte(length) + n = 1 + } else if length < 8384 { + length -= 192 + buf[0] = 192 + byte(length>>8) + buf[1] = byte(length) + n = 2 + } else { + buf[0] = 255 + buf[1] = byte(length >> 24) + buf[2] = byte(length >> 16) + buf[3] = byte(length >> 8) + buf[4] = byte(length) + n = 5 + } + + _, err = w.Write(buf[:n]) + return +} + +// serializeStreamHeader writes an OpenPGP packet header to w where the +// length of the packet is unknown. It returns a io.WriteCloser which can be +// used to write the contents of the packet. See RFC 4880, section 4.2. +func serializeStreamHeader(w io.WriteCloser, ptype packetType) (out io.WriteCloser, err error) { + err = serializeType(w, ptype) + if err != nil { + return + } + out = &partialLengthWriter{w: w} + return +} + +// Packet represents an OpenPGP packet. Users are expected to try casting +// instances of this interface to specific packet types. +type Packet interface { + parse(io.Reader) error +} + +// consumeAll reads from the given Reader until error, returning the number of +// bytes read. +func consumeAll(r io.Reader) (n int64, err error) { + var m int + var buf [1024]byte + + for { + m, err = r.Read(buf[:]) + n += int64(m) + if err == io.EOF { + err = nil + return + } + if err != nil { + return + } + } +} + +// packetType represents the numeric ids of the different OpenPGP packet types. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-2 +type packetType uint8 + +const ( + packetTypeEncryptedKey packetType = 1 + packetTypeSignature packetType = 2 + packetTypeSymmetricKeyEncrypted packetType = 3 + packetTypeOnePassSignature packetType = 4 + packetTypePrivateKey packetType = 5 + packetTypePublicKey packetType = 6 + packetTypePrivateSubkey packetType = 7 + packetTypeCompressed packetType = 8 + packetTypeSymmetricallyEncrypted packetType = 9 + packetTypeLiteralData packetType = 11 + packetTypeUserId packetType = 13 + packetTypePublicSubkey packetType = 14 + packetTypeUserAttribute packetType = 17 + packetTypeSymmetricallyEncryptedMDC packetType = 18 + packetTypeAEADEncrypted packetType = 20 +) + +// EncryptedDataPacket holds encrypted data. It is currently implemented by +// SymmetricallyEncrypted and AEADEncrypted. +type EncryptedDataPacket interface { + Decrypt(CipherFunction, []byte) (io.ReadCloser, error) +} + +// Read reads a single OpenPGP packet from the given io.Reader. If there is an +// error parsing a packet, the whole packet is consumed from the input. +func Read(r io.Reader) (p Packet, err error) { + tag, _, contents, err := readHeader(r) + if err != nil { + return + } + + switch tag { + case packetTypeEncryptedKey: + p = new(EncryptedKey) + case packetTypeSignature: + p = new(Signature) + case packetTypeSymmetricKeyEncrypted: + p = new(SymmetricKeyEncrypted) + case packetTypeOnePassSignature: + p = new(OnePassSignature) + case packetTypePrivateKey, packetTypePrivateSubkey: + pk := new(PrivateKey) + if tag == packetTypePrivateSubkey { + pk.IsSubkey = true + } + p = pk + case packetTypePublicKey, packetTypePublicSubkey: + isSubkey := tag == packetTypePublicSubkey + p = &PublicKey{IsSubkey: isSubkey} + case packetTypeCompressed: + p = new(Compressed) + case packetTypeSymmetricallyEncrypted: + err = errors.UnsupportedError("Symmetrically encrypted packets without MDC are not supported") + case packetTypeLiteralData: + p = new(LiteralData) + case packetTypeUserId: + p = new(UserId) + case packetTypeUserAttribute: + p = new(UserAttribute) + case packetTypeSymmetricallyEncryptedMDC: + se := new(SymmetricallyEncrypted) + se.MDC = true + p = se + case packetTypeAEADEncrypted: + p = new(AEADEncrypted) + default: + err = errors.UnknownPacketTypeError(tag) + } + if p != nil { + err = p.parse(contents) + } + if err != nil { + consumeAll(contents) + } + return +} + +// SignatureType represents the different semantic meanings of an OpenPGP +// signature. See RFC 4880, section 5.2.1. +type SignatureType uint8 + +const ( + SigTypeBinary SignatureType = 0x00 + SigTypeText = 0x01 + SigTypeGenericCert = 0x10 + SigTypePersonaCert = 0x11 + SigTypeCasualCert = 0x12 + SigTypePositiveCert = 0x13 + SigTypeSubkeyBinding = 0x18 + SigTypePrimaryKeyBinding = 0x19 + SigTypeDirectSignature = 0x1F + SigTypeKeyRevocation = 0x20 + SigTypeSubkeyRevocation = 0x28 +) + +// PublicKeyAlgorithm represents the different public key system specified for +// OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-12 +type PublicKeyAlgorithm uint8 + +const ( + PubKeyAlgoRSA PublicKeyAlgorithm = 1 + PubKeyAlgoElGamal PublicKeyAlgorithm = 16 + PubKeyAlgoDSA PublicKeyAlgorithm = 17 + // RFC 6637, Section 5. + PubKeyAlgoECDH PublicKeyAlgorithm = 18 + PubKeyAlgoECDSA PublicKeyAlgorithm = 19 + // https://www.ietf.org/archive/id/draft-koch-eddsa-for-openpgp-04.txt + PubKeyAlgoEdDSA PublicKeyAlgorithm = 22 + + // Deprecated in RFC 4880, Section 13.5. Use key flags instead. + PubKeyAlgoRSAEncryptOnly PublicKeyAlgorithm = 2 + PubKeyAlgoRSASignOnly PublicKeyAlgorithm = 3 +) + +// CanEncrypt returns true if it's possible to encrypt a message to a public +// key of the given type. +func (pka PublicKeyAlgorithm) CanEncrypt() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoElGamal, PubKeyAlgoECDH: + return true + } + return false +} + +// CanSign returns true if it's possible for a public key of the given type to +// sign a message. +func (pka PublicKeyAlgorithm) CanSign() bool { + switch pka { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + return true + } + return false +} + +// CipherFunction represents the different block ciphers specified for OpenPGP. See +// http://www.iana.org/assignments/pgp-parameters/pgp-parameters.xhtml#pgp-parameters-13 +type CipherFunction algorithm.CipherFunction + +const ( + Cipher3DES CipherFunction = 2 + CipherCAST5 CipherFunction = 3 + CipherAES128 CipherFunction = 7 + CipherAES192 CipherFunction = 8 + CipherAES256 CipherFunction = 9 +) + +// KeySize returns the key size, in bytes, of cipher. +func (cipher CipherFunction) KeySize() int { + return algorithm.CipherFunction(cipher).KeySize() +} + +// blockSize returns the block size, in bytes, of cipher. +func (cipher CipherFunction) blockSize() int { + return algorithm.CipherFunction(cipher).BlockSize() +} + +// new returns a fresh instance of the given cipher. +func (cipher CipherFunction) new(key []byte) (block cipher.Block) { + return algorithm.CipherFunction(cipher).New(key) +} + +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + +// CompressionAlgo Represents the different compression algorithms +// supported by OpenPGP (except for BZIP2, which is not currently +// supported). See Section 9.3 of RFC 4880. +type CompressionAlgo uint8 + +const ( + CompressionNone CompressionAlgo = 0 + CompressionZIP CompressionAlgo = 1 + CompressionZLIB CompressionAlgo = 2 +) + +// AEADMode represents the different Authenticated Encryption with Associated +// Data specified for OpenPGP. +type AEADMode algorithm.AEADMode + +const ( + AEADModeEAX AEADMode = 1 + AEADModeOCB AEADMode = 2 + AEADModeExperimentalGCM AEADMode = 100 +) + +func (mode AEADMode) NonceLength() int { + return algorithm.AEADMode(mode).NonceLength() +} + +func (mode AEADMode) TagLength() int { + return algorithm.AEADMode(mode).TagLength() +} + +// new returns a fresh instance of the given mode. +func (mode AEADMode) new(block cipher.Block) cipher.AEAD { + return algorithm.AEADMode(mode).New(block) +} + +// ReasonForRevocation represents a revocation reason code as per RFC4880 +// section 5.2.3.23. +type ReasonForRevocation uint8 + +const ( + NoReason ReasonForRevocation = 0 + KeySuperseded ReasonForRevocation = 1 + KeyCompromised ReasonForRevocation = 2 + KeyRetired ReasonForRevocation = 3 +) diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go new file mode 100644 index 0000000000..854f9c4bbb --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key.go @@ -0,0 +1,780 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "golang.org/x/crypto/curve25519" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" + "golang.org/x/crypto/ed25519" +) + +// PrivateKey represents a possibly encrypted private key. See RFC 4880, +// section 5.5.3. +type PrivateKey struct { + PublicKey + Encrypted bool // if true then the private key is unavailable until Decrypt has been called. + encryptedData []byte + cipher CipherFunction + s2k func(out, in []byte) + // An *{rsa|dsa|elgamal|ecdh|ecdsa|ed25519}.PrivateKey or + // crypto.Signer/crypto.Decrypter (Decryptor RSA only). + PrivateKey interface{} + sha1Checksum bool + iv []byte + + // Type of encryption of the S2K packet + // Allowed values are 0 (Not encrypted), 254 (SHA1), or + // 255 (2-byte checksum) + s2kType S2KType + // Full parameters of the S2K packet + s2kParams *s2k.Params +} + +//S2KType s2k packet type +type S2KType uint8 + +const ( + // S2KNON unencrypt + S2KNON S2KType = 0 + // S2KSHA1 sha1 sum check + S2KSHA1 S2KType = 254 + // S2KCHECKSUM sum check + S2KCHECKSUM S2KType = 255 +) + +func NewRSAPrivateKey(creationTime time.Time, priv *rsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewDSAPrivateKey(creationTime time.Time, priv *dsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewElGamalPrivateKey(creationTime time.Time, priv *elgamal.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewECDSAPrivateKey(creationTime time.Time, priv *ecdsa.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDSAPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +func NewEdDSAPrivateKey(creationTime time.Time, priv *ed25519.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pub := priv.Public().(ed25519.PublicKey) + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pub) + pk.PrivateKey = priv + return pk +} + +func NewECDHPrivateKey(creationTime time.Time, priv *ecdh.PrivateKey) *PrivateKey { + pk := new(PrivateKey) + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + pk.PrivateKey = priv + return pk +} + +// NewSignerPrivateKey creates a PrivateKey from a crypto.Signer that +// implements RSA, ECDSA or EdDSA. +func NewSignerPrivateKey(creationTime time.Time, signer crypto.Signer) *PrivateKey { + pk := new(PrivateKey) + // In general, the public Keys should be used as pointers. We still + // type-switch on the values, for backwards-compatibility. + switch pubkey := signer.Public().(type) { + case *rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, pubkey) + case rsa.PublicKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &pubkey) + case *ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, pubkey) + case ecdsa.PublicKey: + pk.PublicKey = *NewECDSAPublicKey(creationTime, &pubkey) + case *ed25519.PublicKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, pubkey) + case ed25519.PublicKey: + pk.PublicKey = *NewEdDSAPublicKey(creationTime, &pubkey) + default: + panic("openpgp: unknown crypto.Signer type in NewSignerPrivateKey") + } + pk.PrivateKey = signer + return pk +} + +// NewDecrypterPrivateKey creates a PrivateKey from a *{rsa|elgamal|ecdh}.PrivateKey. +func NewDecrypterPrivateKey(creationTime time.Time, decrypter interface{}) *PrivateKey { + pk := new(PrivateKey) + switch priv := decrypter.(type) { + case *rsa.PrivateKey: + pk.PublicKey = *NewRSAPublicKey(creationTime, &priv.PublicKey) + case *elgamal.PrivateKey: + pk.PublicKey = *NewElGamalPublicKey(creationTime, &priv.PublicKey) + case *ecdh.PrivateKey: + pk.PublicKey = *NewECDHPublicKey(creationTime, &priv.PublicKey) + default: + panic("openpgp: unknown decrypter type in NewDecrypterPrivateKey") + } + pk.PrivateKey = decrypter + return pk +} + +func (pk *PrivateKey) parse(r io.Reader) (err error) { + err = (&pk.PublicKey).parse(r) + if err != nil { + return + } + v5 := pk.PublicKey.Version == 5 + + var buf [1]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.s2kType = S2KType(buf[0]) + var optCount [1]byte + if v5 { + if _, err = readFull(r, optCount[:]); err != nil { + return + } + } + + switch pk.s2kType { + case S2KNON: + pk.s2k = nil + pk.Encrypted = false + case S2KSHA1, S2KCHECKSUM: + if v5 && pk.s2kType == S2KCHECKSUM { + return errors.StructuralError("wrong s2k identifier for version 5") + } + _, err = readFull(r, buf[:]) + if err != nil { + return + } + pk.cipher = CipherFunction(buf[0]) + pk.s2kParams, err = s2k.ParseIntoParams(r) + if err != nil { + return + } + if pk.s2kParams.Dummy() { + return + } + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return + } + pk.Encrypted = true + if pk.s2kType == S2KSHA1 { + pk.sha1Checksum = true + } + default: + return errors.UnsupportedError("deprecated s2k function in private key") + } + + if pk.Encrypted { + blockSize := pk.cipher.blockSize() + if blockSize == 0 { + return errors.UnsupportedError("unsupported cipher in private key: " + strconv.Itoa(int(pk.cipher))) + } + pk.iv = make([]byte, blockSize) + _, err = readFull(r, pk.iv) + if err != nil { + return + } + } + + var privateKeyData []byte + if v5 { + var n [4]byte /* secret material four octet count */ + _, err = readFull(r, n[:]) + if err != nil { + return + } + count := uint32(uint32(n[0])<<24 | uint32(n[1])<<16 | uint32(n[2])<<8 | uint32(n[3])) + if !pk.Encrypted { + count = count + 2 /* two octet checksum */ + } + privateKeyData = make([]byte, count) + _, err = readFull(r, privateKeyData) + if err != nil { + return + } + } else { + privateKeyData, err = ioutil.ReadAll(r) + if err != nil { + return + } + } + if !pk.Encrypted { + return pk.parsePrivateKey(privateKeyData) + } + + pk.encryptedData = privateKeyData + return +} + +// Dummy returns true if the private key is a dummy key. This is a GNU extension. +func (pk *PrivateKey) Dummy() bool { + return pk.s2kParams.Dummy() +} + +func mod64kHash(d []byte) uint16 { + var h uint16 + for _, b := range d { + h += uint16(b) + } + return h +} + +func (pk *PrivateKey) Serialize(w io.Writer) (err error) { + contents := bytes.NewBuffer(nil) + err = pk.PublicKey.serializeWithoutHeaders(contents) + if err != nil { + return + } + if _, err = contents.Write([]byte{uint8(pk.s2kType)}); err != nil { + return + } + + optional := bytes.NewBuffer(nil) + if pk.Encrypted || pk.Dummy() { + optional.Write([]byte{uint8(pk.cipher)}) + if err := pk.s2kParams.Serialize(optional); err != nil { + return err + } + if pk.Encrypted { + optional.Write(pk.iv) + } + } + if pk.Version == 5 { + contents.Write([]byte{uint8(optional.Len())}) + } + io.Copy(contents, optional) + + if !pk.Dummy() { + l := 0 + var priv []byte + if !pk.Encrypted { + buf := bytes.NewBuffer(nil) + err = pk.serializePrivateKey(buf) + if err != nil { + return err + } + l = buf.Len() + if pk.sha1Checksum { + h := sha1.New() + h.Write(buf.Bytes()) + buf.Write(h.Sum(nil)) + } else { + checksum := mod64kHash(buf.Bytes()) + buf.Write([]byte{byte(checksum >> 8), byte(checksum)}) + } + priv = buf.Bytes() + } else { + priv, l = pk.encryptedData, len(pk.encryptedData) + } + + if pk.Version == 5 { + contents.Write([]byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)}) + } + contents.Write(priv) + } + + ptype := packetTypePrivateKey + if pk.IsSubkey { + ptype = packetTypePrivateSubkey + } + err = serializeHeader(w, ptype, contents.Len()) + if err != nil { + return + } + _, err = io.Copy(w, contents) + if err != nil { + return + } + return +} + +func serializeRSAPrivateKey(w io.Writer, priv *rsa.PrivateKey) error { + if _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[1]).EncodedBytes()); err != nil { + return err + } + if _, err := w.Write(new(encoding.MPI).SetBig(priv.Primes[0]).EncodedBytes()); err != nil { + return err + } + _, err := w.Write(new(encoding.MPI).SetBig(priv.Precomputed.Qinv).EncodedBytes()) + return err +} + +func serializeDSAPrivateKey(w io.Writer, priv *dsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeElGamalPrivateKey(w io.Writer, priv *elgamal.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.X).EncodedBytes()) + return err +} + +func serializeECDSAPrivateKey(w io.Writer, priv *ecdsa.PrivateKey) error { + _, err := w.Write(new(encoding.MPI).SetBig(priv.D).EncodedBytes()) + return err +} + +func serializeEdDSAPrivateKey(w io.Writer, priv *ed25519.PrivateKey) error { + keySize := ed25519.PrivateKeySize - ed25519.PublicKeySize + _, err := w.Write(encoding.NewMPI((*priv)[:keySize]).EncodedBytes()) + return err +} + +func serializeECDHPrivateKey(w io.Writer, priv *ecdh.PrivateKey) error { + _, err := w.Write(encoding.NewMPI(priv.D).EncodedBytes()) + return err +} + +// Decrypt decrypts an encrypted private key using a passphrase. +func (pk *PrivateKey) Decrypt(passphrase []byte) error { + if pk.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + if !pk.Encrypted { + return nil + } + + key := make([]byte, pk.cipher.KeySize()) + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + cfb := cipher.NewCFBDecrypter(block, pk.iv) + + data := make([]byte, len(pk.encryptedData)) + cfb.XORKeyStream(data, pk.encryptedData) + + if pk.sha1Checksum { + if len(data) < sha1.Size { + return errors.StructuralError("truncated private key data") + } + h := sha1.New() + h.Write(data[:len(data)-sha1.Size]) + sum := h.Sum(nil) + if !bytes.Equal(sum, data[len(data)-sha1.Size:]) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-sha1.Size] + } else { + if len(data) < 2 { + return errors.StructuralError("truncated private key data") + } + var sum uint16 + for i := 0; i < len(data)-2; i++ { + sum += uint16(data[i]) + } + if data[len(data)-2] != uint8(sum>>8) || + data[len(data)-1] != uint8(sum) { + return errors.StructuralError("private key checksum failure") + } + data = data[:len(data)-2] + } + + err := pk.parsePrivateKey(data) + if _, ok := err.(errors.KeyInvalidError); ok { + return errors.KeyInvalidError("invalid key parameters") + } + if err != nil { + return err + } + + // Mark key as unencrypted + pk.s2kType = S2KNON + pk.s2k = nil + pk.Encrypted = false + pk.encryptedData = nil + + return nil +} + +// Encrypt encrypts an unencrypted private key using a passphrase. +func (pk *PrivateKey) Encrypt(passphrase []byte) error { + priv := bytes.NewBuffer(nil) + err := pk.serializePrivateKey(priv) + if err != nil { + return err + } + + //Default config of private key encryption + pk.cipher = CipherAES256 + s2kConfig := &s2k.Config{ + S2KMode: 3, //Iterated + S2KCount: 65536, + Hash: crypto.SHA256, + } + + pk.s2kParams, err = s2k.Generate(rand.Reader, s2kConfig) + if err != nil { + return err + } + privateKeyBytes := priv.Bytes() + key := make([]byte, pk.cipher.KeySize()) + + pk.sha1Checksum = true + pk.s2k, err = pk.s2kParams.Function() + if err != nil { + return err + } + pk.s2k(key, passphrase) + block := pk.cipher.new(key) + pk.iv = make([]byte, pk.cipher.blockSize()) + _, err = rand.Read(pk.iv) + if err != nil { + return err + } + cfb := cipher.NewCFBEncrypter(block, pk.iv) + + if pk.sha1Checksum { + pk.s2kType = S2KSHA1 + h := sha1.New() + h.Write(privateKeyBytes) + sum := h.Sum(nil) + privateKeyBytes = append(privateKeyBytes, sum...) + } else { + pk.s2kType = S2KCHECKSUM + var sum uint16 + for _, b := range privateKeyBytes { + sum += uint16(b) + } + priv.Write([]byte{uint8(sum >> 8), uint8(sum)}) + } + + pk.encryptedData = make([]byte, len(privateKeyBytes)) + cfb.XORKeyStream(pk.encryptedData, privateKeyBytes) + pk.Encrypted = true + pk.PrivateKey = nil + return err +} + +func (pk *PrivateKey) serializePrivateKey(w io.Writer) (err error) { + switch priv := pk.PrivateKey.(type) { + case *rsa.PrivateKey: + err = serializeRSAPrivateKey(w, priv) + case *dsa.PrivateKey: + err = serializeDSAPrivateKey(w, priv) + case *elgamal.PrivateKey: + err = serializeElGamalPrivateKey(w, priv) + case *ecdsa.PrivateKey: + err = serializeECDSAPrivateKey(w, priv) + case *ed25519.PrivateKey: + err = serializeEdDSAPrivateKey(w, priv) + case *ecdh.PrivateKey: + err = serializeECDHPrivateKey(w, priv) + default: + err = errors.InvalidArgumentError("unknown private key type") + } + return +} + +func (pk *PrivateKey) parsePrivateKey(data []byte) (err error) { + switch pk.PublicKey.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoRSAEncryptOnly: + return pk.parseRSAPrivateKey(data) + case PubKeyAlgoDSA: + return pk.parseDSAPrivateKey(data) + case PubKeyAlgoElGamal: + return pk.parseElGamalPrivateKey(data) + case PubKeyAlgoECDSA: + return pk.parseECDSAPrivateKey(data) + case PubKeyAlgoECDH: + return pk.parseECDHPrivateKey(data) + case PubKeyAlgoEdDSA: + return pk.parseEdDSAPrivateKey(data) + } + panic("impossible") +} + +func (pk *PrivateKey) parseRSAPrivateKey(data []byte) (err error) { + rsaPub := pk.PublicKey.PublicKey.(*rsa.PublicKey) + rsaPriv := new(rsa.PrivateKey) + rsaPriv.PublicKey = *rsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + p := new(encoding.MPI) + if _, err := p.ReadFrom(buf); err != nil { + return err + } + + q := new(encoding.MPI) + if _, err := q.ReadFrom(buf); err != nil { + return err + } + + rsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + rsaPriv.Primes = make([]*big.Int, 2) + rsaPriv.Primes[0] = new(big.Int).SetBytes(p.Bytes()) + rsaPriv.Primes[1] = new(big.Int).SetBytes(q.Bytes()) + if err := rsaPriv.Validate(); err != nil { + return errors.KeyInvalidError(err.Error()) + } + rsaPriv.Precompute() + pk.PrivateKey = rsaPriv + + return nil +} + +func (pk *PrivateKey) parseDSAPrivateKey(data []byte) (err error) { + dsaPub := pk.PublicKey.PublicKey.(*dsa.PublicKey) + dsaPriv := new(dsa.PrivateKey) + dsaPriv.PublicKey = *dsaPub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + dsaPriv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateDSAParameters(dsaPriv); err != nil { + return err + } + pk.PrivateKey = dsaPriv + + return nil +} + +func (pk *PrivateKey) parseElGamalPrivateKey(data []byte) (err error) { + pub := pk.PublicKey.PublicKey.(*elgamal.PublicKey) + priv := new(elgamal.PrivateKey) + priv.PublicKey = *pub + + buf := bytes.NewBuffer(data) + x := new(encoding.MPI) + if _, err := x.ReadFrom(buf); err != nil { + return err + } + + priv.X = new(big.Int).SetBytes(x.Bytes()) + if err := validateElGamalParameters(priv); err != nil { + return err + } + pk.PrivateKey = priv + + return nil +} + +func (pk *PrivateKey) parseECDSAPrivateKey(data []byte) (err error) { + ecdsaPub := pk.PublicKey.PublicKey.(*ecdsa.PublicKey) + ecdsaPriv := new(ecdsa.PrivateKey) + ecdsaPriv.PublicKey = *ecdsaPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + ecdsaPriv.D = new(big.Int).SetBytes(d.Bytes()) + if err := validateECDSAParameters(ecdsaPriv); err != nil { + return err + } + pk.PrivateKey = ecdsaPriv + + return nil +} + +func (pk *PrivateKey) parseECDHPrivateKey(data []byte) (err error) { + ecdhPub := pk.PublicKey.PublicKey.(*ecdh.PublicKey) + ecdhPriv := new(ecdh.PrivateKey) + ecdhPriv.PublicKey = *ecdhPub + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + ecdhPriv.D = d.Bytes() + if err := validateECDHParameters(ecdhPriv); err != nil { + return err + } + pk.PrivateKey = ecdhPriv + + return nil +} + +func (pk *PrivateKey) parseEdDSAPrivateKey(data []byte) (err error) { + eddsaPub := pk.PublicKey.PublicKey.(*ed25519.PublicKey) + eddsaPriv := make(ed25519.PrivateKey, ed25519.PrivateKeySize) + + buf := bytes.NewBuffer(data) + d := new(encoding.MPI) + if _, err := d.ReadFrom(buf); err != nil { + return err + } + + priv := d.Bytes() + copy(eddsaPriv[32-len(priv):32], priv) + copy(eddsaPriv[32:], (*eddsaPub)[:]) + if err := validateEdDSAParameters(&eddsaPriv); err != nil { + return err + } + pk.PrivateKey = &eddsaPriv + + return nil +} + +func validateECDSAParameters(priv *ecdsa.PrivateKey) error { + return validateCommonECC(priv.Curve, priv.D.Bytes(), priv.X, priv.Y) +} + +func validateECDHParameters(priv *ecdh.PrivateKey) error { + if priv.CurveType != ecc.Curve25519 { + return validateCommonECC(priv.Curve, priv.D, priv.X, priv.Y) + } + // Handle Curve25519 + Q := priv.X.Bytes()[1:] + var d [32]byte + // Copy reversed d + l := len(priv.D) + for i := 0; i < l; i++ { + d[i] = priv.D[l-i-1] + } + var expectedQ [32]byte + curve25519.ScalarBaseMult(&expectedQ, &d) + if !bytes.Equal(Q, expectedQ[:]) { + return errors.KeyInvalidError("ECDH curve25519: invalid point") + } + return nil +} + +func validateCommonECC(curve elliptic.Curve, d []byte, X, Y *big.Int) error { + // the public point should not be at infinity (0,0) + zero := new(big.Int) + if X.Cmp(zero) == 0 && Y.Cmp(zero) == 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): infinity point", curve.Params().Name)) + } + // re-derive the public point Q' = (X,Y) = dG + // to compare to declared Q in public key + expectedX, expectedY := curve.ScalarBaseMult(d) + if X.Cmp(expectedX) != 0 || Y.Cmp(expectedY) != 0 { + return errors.KeyInvalidError(fmt.Sprintf("ecc (%s): invalid point", curve.Params().Name)) + } + return nil +} + +func validateEdDSAParameters(priv *ed25519.PrivateKey) error { + // In EdDSA, the serialized public point is stored as part of private key (together with the seed), + // hence we can re-derive the key from the seed + seed := priv.Seed() + expectedPriv := ed25519.NewKeyFromSeed(seed) + if !bytes.Equal(*priv, expectedPriv) { + return errors.KeyInvalidError("eddsa: invalid point") + } + return nil +} + +func validateDSAParameters(priv *dsa.PrivateKey) error { + p := priv.P // group prime + q := priv.Q // subgroup order + g := priv.G // g has order q mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("dsa: invalid group") + } + // expect p > q + if p.Cmp(q) <= 0 { + return errors.KeyInvalidError("dsa: invalid group prime") + } + // q should be large enough and divide p-1 + pSub1 := new(big.Int).Sub(p, one) + if q.BitLen() < 150 || new(big.Int).Mod(pSub1, q).Cmp(big.NewInt(0)) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // confirm that g has order q mod p + if !q.ProbablyPrime(32) || new(big.Int).Exp(g, q, p).Cmp(one) != 0 { + return errors.KeyInvalidError("dsa: invalid order") + } + // check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("dsa: mismatching values") + } + + return nil +} + +func validateElGamalParameters(priv *elgamal.PrivateKey) error { + p := priv.P // group prime + g := priv.G // g has order p-1 mod p + x := priv.X // secret + y := priv.Y // y == g**x mod p + one := big.NewInt(1) + // Expect g, y >= 2 and g < p + if g.Cmp(one) <= 0 || y.Cmp(one) <= 0 || g.Cmp(p) > 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + if p.BitLen() < 1024 { + return errors.KeyInvalidError("elgamal: group order too small") + } + pSub1 := new(big.Int).Sub(p, one) + if new(big.Int).Exp(g, pSub1, p).Cmp(one) != 0 { + return errors.KeyInvalidError("elgamal: invalid group") + } + // Since p-1 is not prime, g might have a smaller order that divides p-1. + // We cannot confirm the exact order of g, but we make sure it is not too small. + gExpI := new(big.Int).Set(g) + i := 1 + threshold := 2 << 17 // we want order > threshold + for i < threshold { + i++ // we check every order to make sure key validation is not easily bypassed by guessing y' + gExpI.Mod(new(big.Int).Mul(gExpI, g), p) + if gExpI.Cmp(one) == 0 { + return errors.KeyInvalidError("elgamal: order too small") + } + } + // Check y + if new(big.Int).Exp(g, x, p).Cmp(y) != 0 { + return errors.KeyInvalidError("elgamal: mismatching values") + } + + return nil +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go new file mode 100644 index 0000000000..029b8f1aab --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/private_key_test_data.go @@ -0,0 +1,12 @@ +package packet + +// Generated with `gpg --export-secret-keys "Test Key 2"` +const privKeyRSAHex = "9501fe044cc349a8010400b70ca0010e98c090008d45d1ee8f9113bd5861fd57b88bacb7c68658747663f1e1a3b5a98f32fda6472373c024b97359cd2efc88ff60f77751adfbf6af5e615e6a1408cfad8bf0cea30b0d5f53aa27ad59089ba9b15b7ebc2777a25d7b436144027e3bcd203909f147d0e332b240cf63d3395f5dfe0df0a6c04e8655af7eacdf0011010001fe0303024a252e7d475fd445607de39a265472aa74a9320ba2dac395faa687e9e0336aeb7e9a7397e511b5afd9dc84557c80ac0f3d4d7bfec5ae16f20d41c8c84a04552a33870b930420e230e179564f6d19bb153145e76c33ae993886c388832b0fa042ddda7f133924f3854481533e0ede31d51278c0519b29abc3bf53da673e13e3e1214b52413d179d7f66deee35cac8eacb060f78379d70ef4af8607e68131ff529439668fc39c9ce6dfef8a5ac234d234802cbfb749a26107db26406213ae5c06d4673253a3cbee1fcbae58d6ab77e38d6e2c0e7c6317c48e054edadb5a40d0d48acb44643d998139a8a66bb820be1f3f80185bc777d14b5954b60effe2448a036d565c6bc0b915fcea518acdd20ab07bc1529f561c58cd044f723109b93f6fd99f876ff891d64306b5d08f48bab59f38695e9109c4dec34013ba3153488ce070268381ba923ee1eb77125b36afcb4347ec3478c8f2735b06ef17351d872e577fa95d0c397c88c71b59629a36aec" + +// Generated by `gpg --export-secret-keys` followed by a manual extraction of +// the ElGamal subkey from the packets. +const privKeyElGamalHex = "9d0157044df9ee1a100400eb8e136a58ec39b582629cdadf830bc64e0a94ed8103ca8bb247b27b11b46d1d25297ef4bcc3071785ba0c0bedfe89eabc5287fcc0edf81ab5896c1c8e4b20d27d79813c7aede75320b33eaeeaa586edc00fd1036c10133e6ba0ff277245d0d59d04b2b3421b7244aca5f4a8d870c6f1c1fbff9e1c26699a860b9504f35ca1d700030503fd1ededd3b840795be6d9ccbe3c51ee42e2f39233c432b831ddd9c4e72b7025a819317e47bf94f9ee316d7273b05d5fcf2999c3a681f519b1234bbfa6d359b4752bd9c3f77d6b6456cde152464763414ca130f4e91d91041432f90620fec0e6d6b5116076c2985d5aeaae13be492b9b329efcaf7ee25120159a0a30cd976b42d7afe030302dae7eb80db744d4960c4df930d57e87fe81412eaace9f900e6c839817a614ddb75ba6603b9417c33ea7b6c93967dfa2bcff3fa3c74a5ce2c962db65b03aece14c96cbd0038fc" + +// pkcs1PrivKeyHex is a PKCS#1, RSA private key. +// Generated by `openssl genrsa 1024 | openssl rsa -outform DER | xxd -p` +const pkcs1PrivKeyHex = "3082025d02010002818100e98edfa1c3b35884a54d0b36a6a603b0290fa85e49e30fa23fc94fef9c6790bc4849928607aa48d809da326fb42a969d06ad756b98b9c1a90f5d4a2b6d0ac05953c97f4da3120164a21a679793ce181c906dc01d235cc085ddcdf6ea06c389b6ab8885dfd685959e693138856a68a7e5db263337ff82a088d583a897cf2d59e9020301000102818100b6d5c9eb70b02d5369b3ee5b520a14490b5bde8a317d36f7e4c74b7460141311d1e5067735f8f01d6f5908b2b96fbd881f7a1ab9a84d82753e39e19e2d36856be960d05ac9ef8e8782ea1b6d65aee28fdfe1d61451e8cff0adfe84322f12cf455028b581cf60eb9e0e140ba5d21aeba6c2634d7c65318b9a665fc01c3191ca21024100fa5e818da3705b0fa33278bb28d4b6f6050388af2d4b75ec9375dd91ccf2e7d7068086a8b82a8f6282e4fbbdb8a7f2622eb97295249d87acea7f5f816f54d347024100eecf9406d7dc49cdfb95ab1eff4064de84c7a30f64b2798936a0d2018ba9eb52e4b636f82e96c49cc63b80b675e91e40d1b2e4017d4b9adaf33ab3d9cf1c214f024100c173704ace742c082323066226a4655226819a85304c542b9dacbeacbf5d1881ee863485fcf6f59f3a604f9b42289282067447f2b13dfeed3eab7851fc81e0550240741fc41f3fc002b382eed8730e33c5d8de40256e4accee846667f536832f711ab1d4590e7db91a8a116ac5bff3be13d3f9243ff2e976662aa9b395d907f8e9c9024046a5696c9ef882363e06c9fa4e2f5b580906452befba03f4a99d0f873697ef1f851d2226ca7934b30b7c3e80cb634a67172bbbf4781735fe3e09263e2dd723e7" diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go new file mode 100644 index 0000000000..28971c1ada --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key.go @@ -0,0 +1,825 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + _ "crypto/sha512" + "encoding/binary" + "fmt" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/ecdh" + "github.com/ProtonMail/go-crypto/openpgp/elgamal" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" + "github.com/ProtonMail/go-crypto/openpgp/internal/ecc" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "golang.org/x/crypto/ed25519" +) + +type kdfHashFunction byte +type kdfAlgorithm byte + +// PublicKey represents an OpenPGP public key. See RFC 4880, section 5.5.2. +type PublicKey struct { + Version int + CreationTime time.Time + PubKeyAlgo PublicKeyAlgorithm + PublicKey interface{} // *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey or *eddsa.PublicKey + Fingerprint []byte + KeyId uint64 + IsSubkey bool + + // RFC 4880 fields + n, e, p, q, g, y encoding.Field + + // RFC 6637 fields + // oid contains the OID byte sequence identifying the elliptic curve used + oid encoding.Field + + // kdf stores key derivation function parameters + // used for ECDH encryption. See RFC 6637, Section 9. + kdf encoding.Field +} + +// UpgradeToV5 updates the version of the key to v5, and updates all necessary +// fields. +func (pk *PublicKey) UpgradeToV5() { + pk.Version = 5 + pk.setFingerprintAndKeyId() +} + +// signingKey provides a convenient abstraction over signature verification +// for v3 and v4 public keys. +type signingKey interface { + SerializeForHash(io.Writer) error + SerializeSignaturePrefix(io.Writer) + serializeWithoutHeaders(io.Writer) error +} + +// NewRSAPublicKey returns a PublicKey that wraps the given rsa.PublicKey. +func NewRSAPublicKey(creationTime time.Time, pub *rsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoRSA, + PublicKey: pub, + n: new(encoding.MPI).SetBig(pub.N), + e: new(encoding.MPI).SetBig(big.NewInt(int64(pub.E))), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewDSAPublicKey returns a PublicKey that wraps the given dsa.PublicKey. +func NewDSAPublicKey(creationTime time.Time, pub *dsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoDSA, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + q: new(encoding.MPI).SetBig(pub.Q), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +// NewElGamalPublicKey returns a PublicKey that wraps the given elgamal.PublicKey. +func NewElGamalPublicKey(creationTime time.Time, pub *elgamal.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoElGamal, + PublicKey: pub, + p: new(encoding.MPI).SetBig(pub.P), + g: new(encoding.MPI).SetBig(pub.G), + y: new(encoding.MPI).SetBig(pub.Y), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey { + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDSA, + PublicKey: pub, + p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), + } + + curveInfo := ecc.FindByCurve(pub.Curve) + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewECDHPublicKey(creationTime time.Time, pub *ecdh.PublicKey) *PublicKey { + var pk *PublicKey + var curveInfo *ecc.CurveInfo + var kdf = encoding.NewOID([]byte{0x1, pub.Hash.Id(), pub.Cipher.Id()}) + if pub.CurveType == ecc.Curve25519 { + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(pub.X.Bytes()), + kdf: kdf, + } + curveInfo = ecc.FindByName("Curve25519") + } else { + pk = &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoECDH, + PublicKey: pub, + p: encoding.NewMPI(elliptic.Marshal(pub.Curve, pub.X, pub.Y)), + kdf: kdf, + } + curveInfo = ecc.FindByCurve(pub.Curve) + } + if curveInfo == nil { + panic("unknown elliptic curve") + } + pk.oid = curveInfo.Oid + pk.setFingerprintAndKeyId() + return pk +} + +func NewEdDSAPublicKey(creationTime time.Time, pub *ed25519.PublicKey) *PublicKey { + curveInfo := ecc.FindByName("Ed25519") + pk := &PublicKey{ + Version: 4, + CreationTime: creationTime, + PubKeyAlgo: PubKeyAlgoEdDSA, + PublicKey: pub, + oid: curveInfo.Oid, + // Native point format, see draft-koch-eddsa-for-openpgp-04, Appendix B + p: encoding.NewMPI(append([]byte{0x40}, *pub...)), + } + + pk.setFingerprintAndKeyId() + return pk +} + +func (pk *PublicKey) parse(r io.Reader) (err error) { + // RFC 4880, section 5.5.2 + var buf [6]byte + _, err = readFull(r, buf[:]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + return errors.UnsupportedError("public key version " + strconv.Itoa(int(buf[0]))) + } + + pk.Version = int(buf[0]) + if pk.Version == 5 { + var n [4]byte + _, err = readFull(r, n[:]) + if err != nil { + return + } + } + pk.CreationTime = time.Unix(int64(uint32(buf[1])<<24|uint32(buf[2])<<16|uint32(buf[3])<<8|uint32(buf[4])), 0) + pk.PubKeyAlgo = PublicKeyAlgorithm(buf[5]) + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + err = pk.parseRSA(r) + case PubKeyAlgoDSA: + err = pk.parseDSA(r) + case PubKeyAlgoElGamal: + err = pk.parseElGamal(r) + case PubKeyAlgoECDSA: + err = pk.parseECDSA(r) + case PubKeyAlgoECDH: + err = pk.parseECDH(r) + case PubKeyAlgoEdDSA: + err = pk.parseEdDSA(r) + default: + err = errors.UnsupportedError("public key type: " + strconv.Itoa(int(pk.PubKeyAlgo))) + } + if err != nil { + return + } + + pk.setFingerprintAndKeyId() + return +} + +func (pk *PublicKey) setFingerprintAndKeyId() { + // RFC 4880, section 12.2 + if pk.Version == 5 { + fingerprint := sha256.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 32) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[:8]) + } else { + fingerprint := sha1.New() + pk.SerializeForHash(fingerprint) + pk.Fingerprint = make([]byte, 20) + copy(pk.Fingerprint, fingerprint.Sum(nil)) + pk.KeyId = binary.BigEndian.Uint64(pk.Fingerprint[12:20]) + } +} + +// parseRSA parses RSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseRSA(r io.Reader) (err error) { + pk.n = new(encoding.MPI) + if _, err = pk.n.ReadFrom(r); err != nil { + return + } + pk.e = new(encoding.MPI) + if _, err = pk.e.ReadFrom(r); err != nil { + return + } + + if len(pk.e.Bytes()) > 3 { + err = errors.UnsupportedError("large public exponent") + return + } + rsa := &rsa.PublicKey{ + N: new(big.Int).SetBytes(pk.n.Bytes()), + E: 0, + } + for i := 0; i < len(pk.e.Bytes()); i++ { + rsa.E <<= 8 + rsa.E |= int(pk.e.Bytes()[i]) + } + pk.PublicKey = rsa + return +} + +// parseDSA parses DSA public key material from the given Reader. See RFC 4880, +// section 5.5.2. +func (pk *PublicKey) parseDSA(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.q = new(encoding.MPI) + if _, err = pk.q.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + dsa := new(dsa.PublicKey) + dsa.P = new(big.Int).SetBytes(pk.p.Bytes()) + dsa.Q = new(big.Int).SetBytes(pk.q.Bytes()) + dsa.G = new(big.Int).SetBytes(pk.g.Bytes()) + dsa.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = dsa + return +} + +// parseElGamal parses ElGamal public key material from the given Reader. See +// RFC 4880, section 5.5.2. +func (pk *PublicKey) parseElGamal(r io.Reader) (err error) { + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.g = new(encoding.MPI) + if _, err = pk.g.ReadFrom(r); err != nil { + return + } + pk.y = new(encoding.MPI) + if _, err = pk.y.ReadFrom(r); err != nil { + return + } + + elgamal := new(elgamal.PublicKey) + elgamal.P = new(big.Int).SetBytes(pk.p.Bytes()) + elgamal.G = new(big.Int).SetBytes(pk.g.Bytes()) + elgamal.Y = new(big.Int).SetBytes(pk.y.Bytes()) + pk.PublicKey = elgamal + return +} + +// parseECDSA parses ECDSA public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + var c elliptic.Curve + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil || curveInfo.SigAlgorithm != ecc.ECDSA { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + c = curveInfo.Curve + x, y := elliptic.Unmarshal(c, pk.p.Bytes()) + if x == nil { + return errors.UnsupportedError("failed to parse EC point") + } + pk.PublicKey = &ecdsa.PublicKey{Curve: c, X: x, Y: y} + return +} + +// parseECDH parses ECDH public key material from the given Reader. See +// RFC 6637, Section 9. +func (pk *PublicKey) parseECDH(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + pk.kdf = new(encoding.OID) + if _, err = pk.kdf.ReadFrom(r); err != nil { + return + } + + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + + c := curveInfo.Curve + cType := curveInfo.CurveType + + var x, y *big.Int + if cType == ecc.Curve25519 { + x = new(big.Int) + x.SetBytes(pk.p.Bytes()) + } else { + x, y = elliptic.Unmarshal(c, pk.p.Bytes()) + } + if x == nil { + return errors.UnsupportedError("failed to parse EC point") + } + + if kdfLen := len(pk.kdf.Bytes()); kdfLen < 3 { + return errors.UnsupportedError("unsupported ECDH KDF length: " + strconv.Itoa(kdfLen)) + } + if reserved := pk.kdf.Bytes()[0]; reserved != 0x01 { + return errors.UnsupportedError("unsupported KDF reserved field: " + strconv.Itoa(int(reserved))) + } + kdfHash, ok := algorithm.HashById[pk.kdf.Bytes()[1]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF hash: " + strconv.Itoa(int(pk.kdf.Bytes()[1]))) + } + kdfCipher, ok := algorithm.CipherById[pk.kdf.Bytes()[2]] + if !ok { + return errors.UnsupportedError("unsupported ECDH KDF cipher: " + strconv.Itoa(int(pk.kdf.Bytes()[2]))) + } + + pk.PublicKey = &ecdh.PublicKey{ + CurveType: cType, + Curve: c, + X: x, + Y: y, + KDF: ecdh.KDF{ + Hash: kdfHash, + Cipher: kdfCipher, + }, + } + return +} + +func (pk *PublicKey) parseEdDSA(r io.Reader) (err error) { + pk.oid = new(encoding.OID) + if _, err = pk.oid.ReadFrom(r); err != nil { + return + } + curveInfo := ecc.FindByOid(pk.oid) + if curveInfo == nil || curveInfo.SigAlgorithm != ecc.EdDSA { + return errors.UnsupportedError(fmt.Sprintf("unsupported oid: %x", pk.oid)) + } + pk.p = new(encoding.MPI) + if _, err = pk.p.ReadFrom(r); err != nil { + return + } + + eddsa := make(ed25519.PublicKey, ed25519.PublicKeySize) + switch flag := pk.p.Bytes()[0]; flag { + case 0x04: + // TODO: see _grcy_ecc_eddsa_ensure_compact in grcypt + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + case 0x40: + copy(eddsa[:], pk.p.Bytes()[1:]) + default: + return errors.UnsupportedError("unsupported EdDSA compression: " + strconv.Itoa(int(flag))) + } + + pk.PublicKey = &eddsa + return +} + +// SerializeForHash serializes the PublicKey to w with the special packet +// header format needed for hashing. +func (pk *PublicKey) SerializeForHash(w io.Writer) error { + pk.SerializeSignaturePrefix(w) + return pk.serializeWithoutHeaders(w) +} + +// SerializeSignaturePrefix writes the prefix for this public key to the given Writer. +// The prefix is used when calculating a signature over this public key. See +// RFC 4880, section 5.2.4. +func (pk *PublicKey) SerializeSignaturePrefix(w io.Writer) { + var pLength = pk.algorithmSpecificByteCount() + if pk.Version == 5 { + pLength += 10 // version, timestamp (4), algorithm, key octet count (4). + w.Write([]byte{ + 0x9A, + byte(pLength >> 24), + byte(pLength >> 16), + byte(pLength >> 8), + byte(pLength), + }) + return + } + pLength += 6 + w.Write([]byte{0x99, byte(pLength >> 8), byte(pLength)}) +} + +func (pk *PublicKey) Serialize(w io.Writer) (err error) { + length := 6 // 6 byte header + length += pk.algorithmSpecificByteCount() + if pk.Version == 5 { + length += 4 // octet key count + } + packetType := packetTypePublicKey + if pk.IsSubkey { + packetType = packetTypePublicSubkey + } + err = serializeHeader(w, packetType, length) + if err != nil { + return + } + return pk.serializeWithoutHeaders(w) +} + +func (pk *PublicKey) algorithmSpecificByteCount() int { + length := 0 + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + length += int(pk.n.EncodedLength()) + length += int(pk.e.EncodedLength()) + case PubKeyAlgoDSA: + length += int(pk.p.EncodedLength()) + length += int(pk.q.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoElGamal: + length += int(pk.p.EncodedLength()) + length += int(pk.g.EncodedLength()) + length += int(pk.y.EncodedLength()) + case PubKeyAlgoECDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + case PubKeyAlgoECDH: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + length += int(pk.kdf.EncodedLength()) + case PubKeyAlgoEdDSA: + length += int(pk.oid.EncodedLength()) + length += int(pk.p.EncodedLength()) + default: + panic("unknown public key algorithm") + } + return length +} + +// serializeWithoutHeaders marshals the PublicKey to w in the form of an +// OpenPGP public key packet, not including the packet header. +func (pk *PublicKey) serializeWithoutHeaders(w io.Writer) (err error) { + t := uint32(pk.CreationTime.Unix()) + if _, err = w.Write([]byte{ + byte(pk.Version), + byte(t >> 24), byte(t >> 16), byte(t >> 8), byte(t), + byte(pk.PubKeyAlgo), + }); err != nil { + return + } + + if pk.Version == 5 { + n := pk.algorithmSpecificByteCount() + if _, err = w.Write([]byte{ + byte(n >> 24), byte(n >> 16), byte(n >> 8), byte(n), + }); err != nil { + return + } + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + if _, err = w.Write(pk.n.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.e.EncodedBytes()) + return + case PubKeyAlgoDSA: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.q.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoElGamal: + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.g.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.y.EncodedBytes()) + return + case PubKeyAlgoECDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + case PubKeyAlgoECDH: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + if _, err = w.Write(pk.p.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.kdf.EncodedBytes()) + return + case PubKeyAlgoEdDSA: + if _, err = w.Write(pk.oid.EncodedBytes()); err != nil { + return + } + _, err = w.Write(pk.p.EncodedBytes()) + return + } + return errors.InvalidArgumentError("bad public-key algorithm") +} + +// CanSign returns true iff this public key can generate signatures +func (pk *PublicKey) CanSign() bool { + return pk.PubKeyAlgo != PubKeyAlgoRSAEncryptOnly && pk.PubKeyAlgo != PubKeyAlgoElGamal && pk.PubKeyAlgo != PubKeyAlgoECDH +} + +// VerifySignature returns nil iff sig is a valid signature, made by this +// public key, of the data hashed into signed. signed is mutated by this call. +func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err error) { + if !pk.CanSign() { + return errors.InvalidArgumentError("public key cannot generate signatures") + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + signed.Write(sig.HashSuffix) + hashBytes := signed.Sum(nil) + if hashBytes[0] != sig.HashTag[0] || hashBytes[1] != sig.HashTag[1] { + return errors.SignatureError("hash tag doesn't match") + } + + if pk.PubKeyAlgo != sig.PubKeyAlgo { + return errors.InvalidArgumentError("public key and signature use different algorithms") + } + + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.Bytes())) + if err != nil { + return errors.SignatureError("RSA verification failure") + } + return nil + case PubKeyAlgoDSA: + dsaPublicKey, _ := pk.PublicKey.(*dsa.PublicKey) + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPublicKey.Q.BitLen() + 7) / 8 + if len(hashBytes) > subgroupSize { + hashBytes = hashBytes[:subgroupSize] + } + if !dsa.Verify(dsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.DSASigR.Bytes()), new(big.Int).SetBytes(sig.DSASigS.Bytes())) { + return errors.SignatureError("DSA verification failure") + } + return nil + case PubKeyAlgoECDSA: + ecdsaPublicKey := pk.PublicKey.(*ecdsa.PublicKey) + if !ecdsa.Verify(ecdsaPublicKey, hashBytes, new(big.Int).SetBytes(sig.ECDSASigR.Bytes()), new(big.Int).SetBytes(sig.ECDSASigS.Bytes())) { + return errors.SignatureError("ECDSA verification failure") + } + return nil + case PubKeyAlgoEdDSA: + eddsaPublicKey := pk.PublicKey.(*ed25519.PublicKey) + + sigR := sig.EdDSASigR.Bytes() + sigS := sig.EdDSASigS.Bytes() + + eddsaSig := make([]byte, ed25519.SignatureSize) + copy(eddsaSig[32-len(sigR):32], sigR) + copy(eddsaSig[64-len(sigS):], sigS) + + if !ed25519.Verify(*eddsaPublicKey, hashBytes, eddsaSig) { + return errors.SignatureError("EdDSA verification failure") + } + return nil + default: + return errors.SignatureError("Unsupported public key algorithm used in signature") + } +} + +// keySignatureHash returns a Hash of the message that needs to be signed for +// pk to assert a subkey relationship to signed. +func keySignatureHash(pk, signed signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + if err != nil { + return nil, err + } + + err = signed.SerializeForHash(h) + return +} + +// VerifyKeySignature returns nil iff sig is a valid signature, made by this +// public key, of signed. +func (pk *PublicKey) VerifyKeySignature(signed *PublicKey, sig *Signature) error { + h, err := keySignatureHash(pk, signed, sig.Hash) + if err != nil { + return err + } + if err = pk.VerifySignature(h, sig); err != nil { + return err + } + + if sig.FlagSign { + // Signing subkeys must be cross-signed. See + // https://www.gnupg.org/faq/subkey-cross-certify.html. + if sig.EmbeddedSignature == nil { + return errors.StructuralError("signing subkey is missing cross-signature") + } + // Verify the cross-signature. This is calculated over the same + // data as the main signature, so we cannot just recursively + // call signed.VerifyKeySignature(...) + if h, err = keySignatureHash(pk, signed, sig.EmbeddedSignature.Hash); err != nil { + return errors.StructuralError("error while hashing for cross-signature: " + err.Error()) + } + if err := signed.VerifySignature(h, sig.EmbeddedSignature); err != nil { + return errors.StructuralError("error while verifying cross-signature: " + err.Error()) + } + } + + return nil +} + +func keyRevocationHash(pk signingKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + err = pk.SerializeForHash(h) + + return +} + +// VerifyRevocationSignature returns nil iff sig is a valid signature, made by this +// public key. +func (pk *PublicKey) VerifyRevocationSignature(sig *Signature) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// VerifySubkeyRevocationSignature returns nil iff sig is a valid subkey revocation signature, +// made by the passed in signingKey. +func (pk *PublicKey) VerifySubkeyRevocationSignature(sig *Signature, signingKey *PublicKey) (err error) { + h, err := keyRevocationHash(pk, sig.Hash) + if err != nil { + return err + } + return signingKey.VerifySignature(h, sig) +} + +// userIdSignatureHash returns a Hash of the message that needs to be signed +// to assert that pk is a valid key for id. +func userIdSignatureHash(id string, pk *PublicKey, hashFunc crypto.Hash) (h hash.Hash, err error) { + if !hashFunc.Available() { + return nil, errors.UnsupportedError("hash function") + } + h = hashFunc.New() + + // RFC 4880, section 5.2.4 + pk.SerializeSignaturePrefix(h) + pk.serializeWithoutHeaders(h) + + var buf [5]byte + buf[0] = 0xb4 + buf[1] = byte(len(id) >> 24) + buf[2] = byte(len(id) >> 16) + buf[3] = byte(len(id) >> 8) + buf[4] = byte(len(id)) + h.Write(buf[:]) + h.Write([]byte(id)) + + return +} + +// VerifyUserIdSignature returns nil iff sig is a valid signature, made by this +// public key, that id is the identity of pub. +func (pk *PublicKey) VerifyUserIdSignature(id string, pub *PublicKey, sig *Signature) (err error) { + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return pk.VerifySignature(h, sig) +} + +// KeyIdString returns the public key's fingerprint in capital hex +// (e.g. "6C7EE1B8621CC013"). +func (pk *PublicKey) KeyIdString() string { + return fmt.Sprintf("%X", pk.Fingerprint[12:20]) +} + +// KeyIdShortString returns the short form of public key's fingerprint +// in capital hex, as shown by gpg --list-keys (e.g. "621CC013"). +func (pk *PublicKey) KeyIdShortString() string { + return fmt.Sprintf("%X", pk.Fingerprint[16:20]) +} + +// BitLength returns the bit length for the given public key. +func (pk *PublicKey) BitLength() (bitLength uint16, err error) { + switch pk.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly, PubKeyAlgoRSASignOnly: + bitLength = pk.n.BitLength() + case PubKeyAlgoDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoElGamal: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDSA: + bitLength = pk.p.BitLength() + case PubKeyAlgoECDH: + bitLength = pk.p.BitLength() + case PubKeyAlgoEdDSA: + bitLength = pk.p.BitLength() + default: + err = errors.InvalidArgumentError("bad public-key algorithm") + } + return +} + +// KeyExpired returns whether sig is a self-signature of a key that has +// expired or is created in the future. +func (pk *PublicKey) KeyExpired(sig *Signature, currentTime time.Time) bool { + if pk.CreationTime.After(currentTime) { + return true + } + if sig.KeyLifetimeSecs == nil || *sig.KeyLifetimeSecs == 0 { + return false + } + expiry := pk.CreationTime.Add(time.Duration(*sig.KeyLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go new file mode 100644 index 0000000000..b255f1f6f8 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/public_key_test_data.go @@ -0,0 +1,24 @@ +package packet + +const rsaFingerprintHex = "5fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb" + +const rsaPkDataHex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001" + +const dsaFingerprintHex = "eece4c094db002103714c63c8e8fbe54062f19ed" + +const dsaPkDataHex = "9901a2044d432f89110400cd581334f0d7a1e1bdc8b9d6d8c0baf68793632735d2bb0903224cbaa1dfbf35a60ee7a13b92643421e1eb41aa8d79bea19a115a677f6b8ba3c7818ce53a6c2a24a1608bd8b8d6e55c5090cbde09dd26e356267465ae25e69ec8bdd57c7bbb2623e4d73336f73a0a9098f7f16da2e25252130fd694c0e8070c55a812a423ae7f00a0ebf50e70c2f19c3520a551bd4b08d30f23530d3d03ff7d0bf4a53a64a09dc5e6e6e35854b7d70c882b0c60293401958b1bd9e40abec3ea05ba87cf64899299d4bd6aa7f459c201d3fbbd6c82004bdc5e8a9eb8082d12054cc90fa9d4ec251a843236a588bf49552441817436c4f43326966fe85447d4e6d0acf8fa1ef0f014730770603ad7634c3088dc52501c237328417c31c89ed70400b2f1a98b0bf42f11fefc430704bebbaa41d9f355600c3facee1e490f64208e0e094ea55e3a598a219a58500bf78ac677b670a14f4e47e9cf8eab4f368cc1ddcaa18cc59309d4cc62dd4f680e73e6cc3e1ce87a84d0925efbcb26c575c093fc42eecf45135fabf6403a25c2016e1774c0484e440a18319072c617cc97ac0a3bb0" + +const ecdsaFingerprintHex = "9892270b38b8980b05c8d56d43fe956c542ca00b" + +const ecdsaPkDataHex = "9893045071c29413052b8104002304230401f4867769cedfa52c325018896245443968e52e51d0c2df8d939949cb5b330f2921711fbee1c9b9dddb95d15cb0255e99badeddda7cc23d9ddcaacbc290969b9f24019375d61c2e4e3b36953a28d8b2bc95f78c3f1d592fb24499be348656a7b17e3963187b4361afe497bc5f9f81213f04069f8e1fb9e6a6290ae295ca1a92b894396cb4" + +const ecdhFingerprintHex = "722354df2475a42164d1d49faa8b938f9a201946" + +const ecdhPkDataHex = "b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec91803010909" + +const eddsaFingerprintHex = "b2d5e5ec0e6deca6bc8eeeb00907e75e1dd99ad8" + +const eddsaPkDataHex = "98330456e2132b16092b06010401da470f01010740bbda39266affa511a8c2d02edf690fb784b0499c4406185811a163539ef11dc1b41d74657374696e67203c74657374696e674074657374696e672e636f6d3e8879041316080021050256e2132b021b03050b09080702061508090a0b020416020301021e01021780000a09100907e75e1dd99ad86d0c00fe39d2008359352782bc9b61ac382584cd8eff3f57a18c2287e3afeeb05d1f04ba00fe2d0bc1ddf3ff8adb9afa3e7d9287244b4ec567f3db4d60b74a9b5465ed528203" + +// Source: https://sites.google.com/site/brainhub/pgpecckeys#TOC-ECC-NIST-P-384-key +const ecc384PubHex = `99006f044d53059213052b81040022030304f6b8c5aced5b84ef9f4a209db2e4a9dfb70d28cb8c10ecd57674a9fa5a67389942b62d5e51367df4c7bfd3f8e500feecf07ed265a621a8ebbbe53e947ec78c677eba143bd1533c2b350e1c29f82313e1e1108eba063be1e64b10e6950e799c2db42465635f6473615f64685f333834203c6f70656e70677040627261696e6875622e6f72673e8900cb04101309005305024d530592301480000000002000077072656665727265642d656d61696c2d656e636f64696e67407067702e636f6d7067706d696d65040b090807021901051b03000000021602051e010000000415090a08000a0910098033880f54719fca2b0180aa37350968bd5f115afd8ce7bc7b103822152dbff06d0afcda835329510905b98cb469ba208faab87c7412b799e7b633017f58364ea480e8a1a3f253a0c5f22c446e8be9a9fce6210136ee30811abbd49139de28b5bdf8dc36d06ae748579e9ff503b90073044d53059212052b810400220303042faa84024a20b6735c4897efa5bfb41bf85b7eefeab5ca0cb9ffc8ea04a46acb25534a577694f9e25340a4ab5223a9dd1eda530c8aa2e6718db10d7e672558c7736fe09369ea5739a2a3554bf16d41faa50562f11c6d39bbd5dffb6b9a9ec9180301090989008404181309000c05024d530592051b0c000000000a0910098033880f54719f80970180eee7a6d8fcee41ee4f9289df17f9bcf9d955dca25c583b94336f3a2b2d4986dc5cf417b8d2dc86f741a9e1a6d236c0e3017d1c76575458a0cfb93ae8a2b274fcc65ceecd7a91eec83656ba13219969f06945b48c56bd04152c3a0553c5f2f4bd1267` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go new file mode 100644 index 0000000000..94ae9ad9ac --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/reader.go @@ -0,0 +1,78 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// Reader reads packets from an io.Reader and allows packets to be 'unread' so +// that they result from the next call to Next. +type Reader struct { + q []Packet + readers []io.Reader +} + +// New io.Readers are pushed when a compressed or encrypted packet is processed +// and recursively treated as a new source of packets. However, a carefully +// crafted packet can trigger an infinite recursive sequence of packets. See +// http://mumble.net/~campbell/misc/pgp-quine +// https://web.nvd.nist.gov/view/vuln/detail?vulnId=CVE-2013-4402 +// This constant limits the number of recursive packets that may be pushed. +const maxReaders = 32 + +// Next returns the most recently unread Packet, or reads another packet from +// the top-most io.Reader. Unknown packet types are skipped. +func (r *Reader) Next() (p Packet, err error) { + if len(r.q) > 0 { + p = r.q[len(r.q)-1] + r.q = r.q[:len(r.q)-1] + return + } + + for len(r.readers) > 0 { + p, err = Read(r.readers[len(r.readers)-1]) + if err == nil { + return + } + if err == io.EOF { + r.readers = r.readers[:len(r.readers)-1] + continue + } + // TODO: Add strict mode that rejects unknown packets, instead of ignoring them. + if _, ok := err.(errors.UnknownPacketTypeError); !ok { + return nil, err + } + } + + return nil, io.EOF +} + +// Push causes the Reader to start reading from a new io.Reader. When an EOF +// error is seen from the new io.Reader, it is popped and the Reader continues +// to read from the next most recent io.Reader. Push returns a StructuralError +// if pushing the reader would exceed the maximum recursion level, otherwise it +// returns nil. +func (r *Reader) Push(reader io.Reader) (err error) { + if len(r.readers) >= maxReaders { + return errors.StructuralError("too many layers of packets") + } + r.readers = append(r.readers, reader) + return nil +} + +// Unread causes the given Packet to be returned from the next call to Next. +func (r *Reader) Unread(p Packet) { + r.q = append(r.q, p) +} + +func NewReader(r io.Reader) *Reader { + return &Reader{ + q: nil, + readers: []io.Reader{r}, + } +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go new file mode 100644 index 0000000000..6b1704e429 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/signature.go @@ -0,0 +1,964 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "encoding/asn1" + "encoding/binary" + "hash" + "io" + "math/big" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/encoding" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +const ( + // See RFC 4880, section 5.2.3.21 for details. + KeyFlagCertify = 1 << iota + KeyFlagSign + KeyFlagEncryptCommunications + KeyFlagEncryptStorage +) + +// Signature represents a signature. See RFC 4880, section 5.2. +type Signature struct { + Version int + SigType SignatureType + PubKeyAlgo PublicKeyAlgorithm + Hash crypto.Hash + + // HashSuffix is extra data that is hashed in after the signed data. + HashSuffix []byte + // HashTag contains the first two bytes of the hash for fast rejection + // of bad signed data. + HashTag [2]byte + + // Metadata includes format, filename and time, and is protected by v5 + // signatures of type 0x00 or 0x01. This metadata is included into the hash + // computation; if nil, six 0x00 bytes are used instead. See section 5.2.4. + Metadata *LiteralData + + CreationTime time.Time + + RSASignature encoding.Field + DSASigR, DSASigS encoding.Field + ECDSASigR, ECDSASigS encoding.Field + EdDSASigR, EdDSASigS encoding.Field + + // rawSubpackets contains the unparsed subpackets, in order. + rawSubpackets []outputSubpacket + + // The following are optional so are nil when not included in the + // signature. + + SigLifetimeSecs, KeyLifetimeSecs *uint32 + PreferredSymmetric, PreferredHash, PreferredCompression []uint8 + PreferredAEAD []uint8 + IssuerKeyId *uint64 + IssuerFingerprint []byte + IsPrimaryId *bool + + // FlagsValid is set if any flags were given. See RFC 4880, section + // 5.2.3.21 for details. + FlagsValid bool + FlagCertify, FlagSign, FlagEncryptCommunications, FlagEncryptStorage bool + + // RevocationReason is set if this signature has been revoked. + // See RFC 4880, section 5.2.3.23 for details. + RevocationReason *uint8 + RevocationReasonText string + + // In a self-signature, these flags are set there is a features subpacket + // indicating that the issuer implementation supports these features + // (section 5.2.5.25). + MDC, AEAD, V5Keys bool + + // EmbeddedSignature, if non-nil, is a signature of the parent key, by + // this key. This prevents an attacker from claiming another's signing + // subkey as their own. + EmbeddedSignature *Signature + + outSubpackets []outputSubpacket +} + +func (sig *Signature) parse(r io.Reader) (err error) { + // RFC 4880, section 5.2.3 + var buf [5]byte + _, err = readFull(r, buf[:1]) + if err != nil { + return + } + if buf[0] != 4 && buf[0] != 5 { + err = errors.UnsupportedError("signature packet version " + strconv.Itoa(int(buf[0]))) + return + } + sig.Version = int(buf[0]) + _, err = readFull(r, buf[:5]) + if err != nil { + return + } + sig.SigType = SignatureType(buf[0]) + sig.PubKeyAlgo = PublicKeyAlgorithm(buf[1]) + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly, PubKeyAlgoDSA, PubKeyAlgoECDSA, PubKeyAlgoEdDSA: + default: + err = errors.UnsupportedError("public key algorithm " + strconv.Itoa(int(sig.PubKeyAlgo))) + return + } + + var ok bool + sig.Hash, ok = s2k.HashIdToHash(buf[2]) + if !ok { + return errors.UnsupportedError("hash function " + strconv.Itoa(int(buf[2]))) + } + + hashedSubpacketsLength := int(buf[3])<<8 | int(buf[4]) + hashedSubpackets := make([]byte, hashedSubpacketsLength) + _, err = readFull(r, hashedSubpackets) + if err != nil { + return + } + sig.buildHashSuffix(hashedSubpackets) + err = parseSignatureSubpackets(sig, hashedSubpackets, true) + if err != nil { + return + } + + _, err = readFull(r, buf[:2]) + if err != nil { + return + } + unhashedSubpacketsLength := int(buf[0])<<8 | int(buf[1]) + unhashedSubpackets := make([]byte, unhashedSubpacketsLength) + _, err = readFull(r, unhashedSubpackets) + if err != nil { + return + } + err = parseSignatureSubpackets(sig, unhashedSubpackets, false) + if err != nil { + return + } + + _, err = readFull(r, sig.HashTag[:2]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sig.RSASignature = new(encoding.MPI) + _, err = sig.RSASignature.ReadFrom(r) + case PubKeyAlgoDSA: + sig.DSASigR = new(encoding.MPI) + if _, err = sig.DSASigR.ReadFrom(r); err != nil { + return + } + + sig.DSASigS = new(encoding.MPI) + _, err = sig.DSASigS.ReadFrom(r) + case PubKeyAlgoECDSA: + sig.ECDSASigR = new(encoding.MPI) + if _, err = sig.ECDSASigR.ReadFrom(r); err != nil { + return + } + + sig.ECDSASigS = new(encoding.MPI) + _, err = sig.ECDSASigS.ReadFrom(r) + case PubKeyAlgoEdDSA: + sig.EdDSASigR = new(encoding.MPI) + if _, err = sig.EdDSASigR.ReadFrom(r); err != nil { + return + } + + sig.EdDSASigS = new(encoding.MPI) + if _, err = sig.EdDSASigS.ReadFrom(r); err != nil { + return + } + default: + panic("unreachable") + } + return +} + +// parseSignatureSubpackets parses subpackets of the main signature packet. See +// RFC 4880, section 5.2.3.1. +func parseSignatureSubpackets(sig *Signature, subpackets []byte, isHashed bool) (err error) { + for len(subpackets) > 0 { + subpackets, err = parseSignatureSubpacket(sig, subpackets, isHashed) + if err != nil { + return + } + } + + if sig.CreationTime.IsZero() { + err = errors.StructuralError("no creation time in signature") + } + + return +} + +type signatureSubpacketType uint8 + +const ( + creationTimeSubpacket signatureSubpacketType = 2 + signatureExpirationSubpacket signatureSubpacketType = 3 + keyExpirationSubpacket signatureSubpacketType = 9 + prefSymmetricAlgosSubpacket signatureSubpacketType = 11 + issuerSubpacket signatureSubpacketType = 16 + prefHashAlgosSubpacket signatureSubpacketType = 21 + prefCompressionSubpacket signatureSubpacketType = 22 + primaryUserIdSubpacket signatureSubpacketType = 25 + keyFlagsSubpacket signatureSubpacketType = 27 + reasonForRevocationSubpacket signatureSubpacketType = 29 + featuresSubpacket signatureSubpacketType = 30 + embeddedSignatureSubpacket signatureSubpacketType = 32 + issuerFingerprintSubpacket signatureSubpacketType = 33 + prefAeadAlgosSubpacket signatureSubpacketType = 34 +) + +// parseSignatureSubpacket parses a single subpacket. len(subpacket) is >= 1. +func parseSignatureSubpacket(sig *Signature, subpacket []byte, isHashed bool) (rest []byte, err error) { + // RFC 4880, section 5.2.3.1 + var ( + length uint32 + packetType signatureSubpacketType + isCritical bool + ) + switch { + case subpacket[0] < 192: + length = uint32(subpacket[0]) + subpacket = subpacket[1:] + case subpacket[0] < 255: + if len(subpacket) < 2 { + goto Truncated + } + length = uint32(subpacket[0]-192)<<8 + uint32(subpacket[1]) + 192 + subpacket = subpacket[2:] + default: + if len(subpacket) < 5 { + goto Truncated + } + length = uint32(subpacket[1])<<24 | + uint32(subpacket[2])<<16 | + uint32(subpacket[3])<<8 | + uint32(subpacket[4]) + subpacket = subpacket[5:] + } + if length > uint32(len(subpacket)) { + goto Truncated + } + rest = subpacket[length:] + subpacket = subpacket[:length] + if len(subpacket) == 0 { + err = errors.StructuralError("zero length signature subpacket") + return + } + packetType = signatureSubpacketType(subpacket[0] & 0x7f) + isCritical = subpacket[0]&0x80 == 0x80 + subpacket = subpacket[1:] + sig.rawSubpackets = append(sig.rawSubpackets, outputSubpacket{isHashed, packetType, isCritical, subpacket}) + switch packetType { + case creationTimeSubpacket: + if !isHashed { + err = errors.StructuralError("signature creation time in non-hashed area") + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("signature creation time not four bytes") + return + } + t := binary.BigEndian.Uint32(subpacket) + sig.CreationTime = time.Unix(int64(t), 0) + case signatureExpirationSubpacket: + // Signature expiration time, section 5.2.3.10 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("expiration subpacket with bad length") + return + } + sig.SigLifetimeSecs = new(uint32) + *sig.SigLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case keyExpirationSubpacket: + // Key expiration time, section 5.2.3.6 + if !isHashed { + return + } + if len(subpacket) != 4 { + err = errors.StructuralError("key expiration subpacket with bad length") + return + } + sig.KeyLifetimeSecs = new(uint32) + *sig.KeyLifetimeSecs = binary.BigEndian.Uint32(subpacket) + case prefSymmetricAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.7 + if !isHashed { + return + } + sig.PreferredSymmetric = make([]byte, len(subpacket)) + copy(sig.PreferredSymmetric, subpacket) + case issuerSubpacket: + if sig.Version > 4 { + err = errors.StructuralError("issuer subpacket found in v5 key") + } + // Issuer, section 5.2.3.5 + if len(subpacket) != 8 { + err = errors.StructuralError("issuer subpacket with bad length") + return + } + sig.IssuerKeyId = new(uint64) + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket) + case prefHashAlgosSubpacket: + // Preferred hash algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredHash = make([]byte, len(subpacket)) + copy(sig.PreferredHash, subpacket) + case prefCompressionSubpacket: + // Preferred compression algorithms, section 5.2.3.9 + if !isHashed { + return + } + sig.PreferredCompression = make([]byte, len(subpacket)) + copy(sig.PreferredCompression, subpacket) + case primaryUserIdSubpacket: + // Primary User ID, section 5.2.3.19 + if !isHashed { + return + } + if len(subpacket) != 1 { + err = errors.StructuralError("primary user id subpacket with bad length") + return + } + sig.IsPrimaryId = new(bool) + if subpacket[0] > 0 { + *sig.IsPrimaryId = true + } + case keyFlagsSubpacket: + // Key flags, section 5.2.3.21 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty key flags subpacket") + return + } + sig.FlagsValid = true + if subpacket[0]&KeyFlagCertify != 0 { + sig.FlagCertify = true + } + if subpacket[0]&KeyFlagSign != 0 { + sig.FlagSign = true + } + if subpacket[0]&KeyFlagEncryptCommunications != 0 { + sig.FlagEncryptCommunications = true + } + if subpacket[0]&KeyFlagEncryptStorage != 0 { + sig.FlagEncryptStorage = true + } + case reasonForRevocationSubpacket: + // Reason For Revocation, section 5.2.3.23 + if !isHashed { + return + } + if len(subpacket) == 0 { + err = errors.StructuralError("empty revocation reason subpacket") + return + } + sig.RevocationReason = new(uint8) + *sig.RevocationReason = subpacket[0] + sig.RevocationReasonText = string(subpacket[1:]) + case featuresSubpacket: + // Features subpacket, section 5.2.3.24 specifies a very general + // mechanism for OpenPGP implementations to signal support for new + // features. + if !isHashed { + return + } + if len(subpacket) > 0 { + if subpacket[0]&0x01 != 0 { + sig.MDC = true + } + if subpacket[0]&0x02 != 0 { + sig.AEAD = true + } + if subpacket[0]&0x04 != 0 { + sig.V5Keys = true + } + } + case embeddedSignatureSubpacket: + // Only usage is in signatures that cross-certify + // signing subkeys. section 5.2.3.26 describes the + // format, with its usage described in section 11.1 + if sig.EmbeddedSignature != nil { + err = errors.StructuralError("Cannot have multiple embedded signatures") + return + } + sig.EmbeddedSignature = new(Signature) + // Embedded signatures are required to be v4 signatures see + // section 12.1. However, we only parse v4 signatures in this + // file anyway. + if err := sig.EmbeddedSignature.parse(bytes.NewBuffer(subpacket)); err != nil { + return nil, err + } + if sigType := sig.EmbeddedSignature.SigType; sigType != SigTypePrimaryKeyBinding { + return nil, errors.StructuralError("cross-signature has unexpected type " + strconv.Itoa(int(sigType))) + } + case issuerFingerprintSubpacket: + v, l := subpacket[0], len(subpacket[1:]) + if v == 5 && l != 32 || v != 5 && l != 20 { + return nil, errors.StructuralError("bad fingerprint length") + } + sig.IssuerFingerprint = make([]byte, l) + copy(sig.IssuerFingerprint, subpacket[1:]) + sig.IssuerKeyId = new(uint64) + if v == 5 { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[1:9]) + } else { + *sig.IssuerKeyId = binary.BigEndian.Uint64(subpacket[13:21]) + } + case prefAeadAlgosSubpacket: + // Preferred symmetric algorithms, section 5.2.3.8 + if !isHashed { + return + } + sig.PreferredAEAD = make([]byte, len(subpacket)) + copy(sig.PreferredAEAD, subpacket) + default: + if isCritical { + err = errors.UnsupportedError("unknown critical signature subpacket type " + strconv.Itoa(int(packetType))) + return + } + } + return + +Truncated: + err = errors.StructuralError("signature subpacket truncated") + return +} + +// subpacketLengthLength returns the length, in bytes, of an encoded length value. +func subpacketLengthLength(length int) int { + if length < 192 { + return 1 + } + if length < 16320 { + return 2 + } + return 5 +} + +func (sig *Signature) CheckKeyIdOrFingerprint(pk *PublicKey) bool { + if sig.IssuerFingerprint != nil && len(sig.IssuerFingerprint) >= 20 { + return bytes.Equal(sig.IssuerFingerprint, pk.Fingerprint) + } + return sig.IssuerKeyId != nil && *sig.IssuerKeyId == pk.KeyId +} + +// serializeSubpacketLength marshals the given length into to. +func serializeSubpacketLength(to []byte, length int) int { + // RFC 4880, Section 4.2.2. + if length < 192 { + to[0] = byte(length) + return 1 + } + if length < 16320 { + length -= 192 + to[0] = byte((length >> 8) + 192) + to[1] = byte(length) + return 2 + } + to[0] = 255 + to[1] = byte(length >> 24) + to[2] = byte(length >> 16) + to[3] = byte(length >> 8) + to[4] = byte(length) + return 5 +} + +// subpacketsLength returns the serialized length, in bytes, of the given +// subpackets. +func subpacketsLength(subpackets []outputSubpacket, hashed bool) (length int) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + length += subpacketLengthLength(len(subpacket.contents) + 1) + length += 1 // type byte + length += len(subpacket.contents) + } + } + return +} + +// serializeSubpackets marshals the given subpackets into to. +func serializeSubpackets(to []byte, subpackets []outputSubpacket, hashed bool) { + for _, subpacket := range subpackets { + if subpacket.hashed == hashed { + n := serializeSubpacketLength(to, len(subpacket.contents)+1) + to[n] = byte(subpacket.subpacketType) + to = to[1+n:] + n = copy(to, subpacket.contents) + to = to[n:] + } + } + return +} + +// SigExpired returns whether sig is a signature that has expired or is created +// in the future. +func (sig *Signature) SigExpired(currentTime time.Time) bool { + if sig.CreationTime.After(currentTime) { + return true + } + if sig.SigLifetimeSecs == nil || *sig.SigLifetimeSecs == 0 { + return false + } + expiry := sig.CreationTime.Add(time.Duration(*sig.SigLifetimeSecs) * time.Second) + return currentTime.After(expiry) +} + +// buildHashSuffix constructs the HashSuffix member of sig in preparation for signing. +func (sig *Signature) buildHashSuffix(hashedSubpackets []byte) (err error) { + hash, ok := s2k.HashToHashId(sig.Hash) + if !ok { + sig.HashSuffix = nil + return errors.InvalidArgumentError("hash cannot be represented in OpenPGP: " + strconv.Itoa(int(sig.Hash))) + } + + hashedFields := bytes.NewBuffer([]byte{ + uint8(sig.Version), + uint8(sig.SigType), + uint8(sig.PubKeyAlgo), + uint8(hash), + uint8(len(hashedSubpackets) >> 8), + uint8(len(hashedSubpackets)), + }) + hashedFields.Write(hashedSubpackets) + + var l uint64 = uint64(6 + len(hashedSubpackets)) + if sig.Version == 5 { + hashedFields.Write([]byte{0x05, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } else { + hashedFields.Write([]byte{0x04, 0xff}) + hashedFields.Write([]byte{ + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + } + sig.HashSuffix = make([]byte, hashedFields.Len()) + copy(sig.HashSuffix, hashedFields.Bytes()) + return +} + +func (sig *Signature) signPrepareHash(h hash.Hash) (digest []byte, err error) { + hashedSubpacketsLen := subpacketsLength(sig.outSubpackets, true) + hashedSubpackets := make([]byte, hashedSubpacketsLen) + serializeSubpackets(hashedSubpackets, sig.outSubpackets, true) + err = sig.buildHashSuffix(hashedSubpackets) + if err != nil { + return + } + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.AddMetadataToHashSuffix() + } + + h.Write(sig.HashSuffix) + digest = h.Sum(nil) + copy(sig.HashTag[:], digest) + return +} + +// Sign signs a message with a private key. The hash, h, must contain +// the hash of the message to be signed and will be mutated by this function. +// On success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) Sign(h hash.Hash, priv *PrivateKey, config *Config) (err error) { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + sig.Version = priv.PublicKey.Version + sig.IssuerFingerprint = priv.PublicKey.Fingerprint + sig.outSubpackets, err = sig.buildSubpackets(priv.PublicKey) + if err != nil { + return err + } + digest, err := sig.signPrepareHash(h) + if err != nil { + return + } + switch priv.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + // supports both *rsa.PrivateKey and crypto.Signer + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + sig.RSASignature = encoding.NewMPI(sigdata) + } + case PubKeyAlgoDSA: + dsaPriv := priv.PrivateKey.(*dsa.PrivateKey) + + // Need to truncate hashBytes to match FIPS 186-3 section 4.6. + subgroupSize := (dsaPriv.Q.BitLen() + 7) / 8 + if len(digest) > subgroupSize { + digest = digest[:subgroupSize] + } + r, s, err := dsa.Sign(config.Random(), dsaPriv, digest) + if err == nil { + sig.DSASigR = new(encoding.MPI).SetBig(r) + sig.DSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoECDSA: + var r, s *big.Int + if pk, ok := priv.PrivateKey.(*ecdsa.PrivateKey); ok { + // direct support, avoid asn1 wrapping/unwrapping + r, s, err = ecdsa.Sign(config.Random(), pk, digest) + } else { + var b []byte + b, err = priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, sig.Hash) + if err == nil { + r, s, err = unwrapECDSASig(b) + } + } + if err == nil { + sig.ECDSASigR = new(encoding.MPI).SetBig(r) + sig.ECDSASigS = new(encoding.MPI).SetBig(s) + } + case PubKeyAlgoEdDSA: + sigdata, err := priv.PrivateKey.(crypto.Signer).Sign(config.Random(), digest, crypto.Hash(0)) + if err == nil { + sig.EdDSASigR = encoding.NewMPI(sigdata[:32]) + sig.EdDSASigS = encoding.NewMPI(sigdata[32:]) + } + default: + err = errors.UnsupportedError("public key algorithm: " + strconv.Itoa(int(sig.PubKeyAlgo))) + } + + return +} + +// unwrapECDSASig parses the two integer components of an ASN.1-encoded ECDSA +// signature. +func unwrapECDSASig(b []byte) (r, s *big.Int, err error) { + var ecsdaSig struct { + R, S *big.Int + } + _, err = asn1.Unmarshal(b, &ecsdaSig) + if err != nil { + return + } + return ecsdaSig.R, ecsdaSig.S, nil +} + +// SignUserId computes a signature from priv, asserting that pub is a valid +// key for the identity id. On success, the signature is stored in sig. Call +// Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignUserId(id string, pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := userIdSignatureHash(id, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// CrossSignKey computes a signature from signingKey on pub hashed using hashKey. On success, +// the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) CrossSignKey(pub *PublicKey, hashKey *PublicKey, signingKey *PrivateKey, + config *Config) error { + h, err := keySignatureHash(hashKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, signingKey, config) +} + +// SignKey computes a signature from priv, asserting that pub is a subkey. On +// success, the signature is stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) SignKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + if priv.Dummy() { + return errors.ErrDummyPrivateKey("dummy key found") + } + h, err := keySignatureHash(&priv.PublicKey, pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// RevokeKey computes a revocation signature of pub using priv. On success, the signature is +// stored in sig. Call Serialize to write it out. +// If config is nil, sensible defaults will be used. +func (sig *Signature) RevokeKey(pub *PublicKey, priv *PrivateKey, config *Config) error { + h, err := keyRevocationHash(pub, sig.Hash) + if err != nil { + return err + } + return sig.Sign(h, priv, config) +} + +// Serialize marshals sig to w. Sign, SignUserId or SignKey must have been +// called first. +func (sig *Signature) Serialize(w io.Writer) (err error) { + if len(sig.outSubpackets) == 0 { + sig.outSubpackets = sig.rawSubpackets + } + if sig.RSASignature == nil && sig.DSASigR == nil && sig.ECDSASigR == nil && sig.EdDSASigR == nil { + return errors.InvalidArgumentError("Signature: need to call Sign, SignUserId or SignKey before Serialize") + } + + sigLength := 0 + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + sigLength = int(sig.RSASignature.EncodedLength()) + case PubKeyAlgoDSA: + sigLength = int(sig.DSASigR.EncodedLength()) + sigLength += int(sig.DSASigS.EncodedLength()) + case PubKeyAlgoECDSA: + sigLength = int(sig.ECDSASigR.EncodedLength()) + sigLength += int(sig.ECDSASigS.EncodedLength()) + case PubKeyAlgoEdDSA: + sigLength = int(sig.EdDSASigR.EncodedLength()) + sigLength += int(sig.EdDSASigS.EncodedLength()) + default: + panic("impossible") + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + length := len(sig.HashSuffix) - 6 /* trailer not included */ + + 2 /* length of unhashed subpackets */ + unhashedSubpacketsLen + + 2 /* hash tag */ + sigLength + if sig.Version == 5 { + length -= 4 // eight-octet instead of four-octet big endian + } + err = serializeHeader(w, packetTypeSignature, length) + if err != nil { + return + } + err = sig.serializeBody(w) + if err != nil { + return err + } + return +} + +func (sig *Signature) serializeBody(w io.Writer) (err error) { + hashedSubpacketsLen := uint16(uint16(sig.HashSuffix[4])<<8) | uint16(sig.HashSuffix[5]) + fields := sig.HashSuffix[:6+hashedSubpacketsLen] + _, err = w.Write(fields) + if err != nil { + return + } + + unhashedSubpacketsLen := subpacketsLength(sig.outSubpackets, false) + unhashedSubpackets := make([]byte, 2+unhashedSubpacketsLen) + unhashedSubpackets[0] = byte(unhashedSubpacketsLen >> 8) + unhashedSubpackets[1] = byte(unhashedSubpacketsLen) + serializeSubpackets(unhashedSubpackets[2:], sig.outSubpackets, false) + + _, err = w.Write(unhashedSubpackets) + if err != nil { + return + } + _, err = w.Write(sig.HashTag[:]) + if err != nil { + return + } + + switch sig.PubKeyAlgo { + case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: + _, err = w.Write(sig.RSASignature.EncodedBytes()) + case PubKeyAlgoDSA: + if _, err = w.Write(sig.DSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.DSASigS.EncodedBytes()) + case PubKeyAlgoECDSA: + if _, err = w.Write(sig.ECDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.ECDSASigS.EncodedBytes()) + case PubKeyAlgoEdDSA: + if _, err = w.Write(sig.EdDSASigR.EncodedBytes()); err != nil { + return + } + _, err = w.Write(sig.EdDSASigS.EncodedBytes()) + default: + panic("impossible") + } + return +} + +// outputSubpacket represents a subpacket to be marshaled. +type outputSubpacket struct { + hashed bool // true if this subpacket is in the hashed area. + subpacketType signatureSubpacketType + isCritical bool + contents []byte +} + +func (sig *Signature) buildSubpackets(issuer PublicKey) (subpackets []outputSubpacket, err error) { + creationTime := make([]byte, 4) + binary.BigEndian.PutUint32(creationTime, uint32(sig.CreationTime.Unix())) + subpackets = append(subpackets, outputSubpacket{true, creationTimeSubpacket, false, creationTime}) + + if sig.IssuerKeyId != nil && sig.Version == 4 { + keyId := make([]byte, 8) + binary.BigEndian.PutUint64(keyId, *sig.IssuerKeyId) + subpackets = append(subpackets, outputSubpacket{true, issuerSubpacket, true, keyId}) + } + if sig.IssuerFingerprint != nil { + contents := append([]uint8{uint8(issuer.Version)}, sig.IssuerFingerprint...) + subpackets = append(subpackets, outputSubpacket{true, issuerFingerprintSubpacket, true, contents}) + } + if sig.SigLifetimeSecs != nil && *sig.SigLifetimeSecs != 0 { + sigLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(sigLifetime, *sig.SigLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, signatureExpirationSubpacket, true, sigLifetime}) + } + + // Key flags may only appear in self-signatures or certification signatures. + + if sig.FlagsValid { + var flags byte + if sig.FlagCertify { + flags |= KeyFlagCertify + } + if sig.FlagSign { + flags |= KeyFlagSign + } + if sig.FlagEncryptCommunications { + flags |= KeyFlagEncryptCommunications + } + if sig.FlagEncryptStorage { + flags |= KeyFlagEncryptStorage + } + subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}}) + } + + // The following subpackets may only appear in self-signatures. + + var features = byte(0x00) + if sig.MDC { + features |= 0x01 + } + if sig.AEAD { + features |= 0x02 + } + if sig.V5Keys { + features |= 0x04 + } + + if features != 0x00 { + subpackets = append(subpackets, outputSubpacket{true, featuresSubpacket, false, []byte{features}}) + } + + if sig.KeyLifetimeSecs != nil && *sig.KeyLifetimeSecs != 0 { + keyLifetime := make([]byte, 4) + binary.BigEndian.PutUint32(keyLifetime, *sig.KeyLifetimeSecs) + subpackets = append(subpackets, outputSubpacket{true, keyExpirationSubpacket, true, keyLifetime}) + } + + if sig.IsPrimaryId != nil && *sig.IsPrimaryId { + subpackets = append(subpackets, outputSubpacket{true, primaryUserIdSubpacket, false, []byte{1}}) + } + + if len(sig.PreferredSymmetric) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefSymmetricAlgosSubpacket, false, sig.PreferredSymmetric}) + } + + if len(sig.PreferredHash) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefHashAlgosSubpacket, false, sig.PreferredHash}) + } + + if len(sig.PreferredCompression) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefCompressionSubpacket, false, sig.PreferredCompression}) + } + + if len(sig.PreferredAEAD) > 0 { + subpackets = append(subpackets, outputSubpacket{true, prefAeadAlgosSubpacket, false, sig.PreferredAEAD}) + } + + // Revocation reason appears only in revocation signatures and is serialized as per section 5.2.3.23. + if sig.RevocationReason != nil { + subpackets = append(subpackets, outputSubpacket{true, reasonForRevocationSubpacket, true, + append([]uint8{*sig.RevocationReason}, []uint8(sig.RevocationReasonText)...)}) + } + + // EmbeddedSignature appears only in subkeys capable of signing and is serialized as per section 5.2.3.26. + if sig.EmbeddedSignature != nil { + var buf bytes.Buffer + err = sig.EmbeddedSignature.serializeBody(&buf) + if err != nil { + return + } + subpackets = append(subpackets, outputSubpacket{true, embeddedSignatureSubpacket, true, buf.Bytes()}) + } + + return +} + +// AddMetadataToHashSuffix modifies the current hash suffix to include metadata +// (format, filename, and time). Version 5 keys protect this data including it +// in the hash computation. See section 5.2.4. +func (sig *Signature) AddMetadataToHashSuffix() { + if sig == nil || sig.Version != 5 { + return + } + if sig.SigType != 0x00 && sig.SigType != 0x01 { + return + } + lit := sig.Metadata + if lit == nil { + // This will translate into six 0x00 bytes. + lit = &LiteralData{} + } + + // Extract the current byte count + n := sig.HashSuffix[len(sig.HashSuffix)-8:] + l := uint64( + uint64(n[0])<<56 | uint64(n[1])<<48 | uint64(n[2])<<40 | uint64(n[3])<<32 | + uint64(n[4])<<24 | uint64(n[5])<<16 | uint64(n[6])<<8 | uint64(n[7])) + + suffix := bytes.NewBuffer(nil) + suffix.Write(sig.HashSuffix[:l]) + + // Add the metadata + var buf [4]byte + buf[0] = lit.Format + fileName := lit.FileName + if len(lit.FileName) > 255 { + fileName = fileName[:255] + } + buf[1] = byte(len(fileName)) + suffix.Write(buf[:2]) + suffix.Write([]byte(lit.FileName)) + binary.BigEndian.PutUint32(buf[:], lit.Time) + suffix.Write(buf[:]) + + // Update the counter and restore trailing bytes + l = uint64(suffix.Len()) + suffix.Write([]byte{0x05, 0xff}) + suffix.Write([]byte{ + uint8(l >> 56), uint8(l >> 48), uint8(l >> 40), uint8(l >> 32), + uint8(l >> 24), uint8(l >> 16), uint8(l >> 8), uint8(l), + }) + sig.HashSuffix = suffix.Bytes() +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go new file mode 100644 index 0000000000..0e9f51d51b --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetric_key_encrypted.go @@ -0,0 +1,267 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "crypto/cipher" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// This is the largest session key that we'll support. Since no 512-bit cipher +// has even been seriously used, this is comfortably large. +const maxSessionKeySizeInBytes = 64 + +// SymmetricKeyEncrypted represents a passphrase protected session key. See RFC +// 4880, section 5.3. +type SymmetricKeyEncrypted struct { + Version int + CipherFunc CipherFunction + Mode AEADMode + s2k func(out, in []byte) + aeadNonce []byte + encryptedKey []byte +} + +func (ske *SymmetricKeyEncrypted) parse(r io.Reader) error { + // RFC 4880, section 5.3. + var buf [2]byte + if _, err := readFull(r, buf[:]); err != nil { + return err + } + ske.Version = int(buf[0]) + if ske.Version != 4 && ske.Version != 5 { + return errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + } + ske.CipherFunc = CipherFunction(buf[1]) + if ske.CipherFunc.KeySize() == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(buf[1]))) + } + + if ske.Version == 5 { + mode := make([]byte, 1) + if _, err := r.Read(mode); err != nil { + return errors.StructuralError("cannot read AEAD octect from packet") + } + ske.Mode = AEADMode(mode[0]) + } + + var err error + if ske.s2k, err = s2k.Parse(r); err != nil { + if _, ok := err.(errors.ErrDummyPrivateKey); ok { + return errors.UnsupportedError("missing key GNU extension in session key") + } + return err + } + + if ske.Version == 5 { + // AEAD nonce + nonce := make([]byte, ske.Mode.NonceLength()) + _, err := readFull(r, nonce) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + ske.aeadNonce = nonce + } + + encryptedKey := make([]byte, maxSessionKeySizeInBytes) + // The session key may follow. We just have to try and read to find + // out. If it exists then we limit it to maxSessionKeySizeInBytes. + n, err := readFull(r, encryptedKey) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if n != 0 { + if n == maxSessionKeySizeInBytes { + return errors.UnsupportedError("oversized encrypted session key") + } + ske.encryptedKey = encryptedKey[:n] + } + return nil +} + +// Decrypt attempts to decrypt an encrypted session key and returns the key and +// the cipher to use when decrypting a subsequent Symmetrically Encrypted Data +// packet. +func (ske *SymmetricKeyEncrypted) Decrypt(passphrase []byte) ([]byte, CipherFunction, error) { + key := make([]byte, ske.CipherFunc.KeySize()) + ske.s2k(key, passphrase) + if len(ske.encryptedKey) == 0 { + return key, ske.CipherFunc, nil + } + switch ske.Version { + case 4: + plaintextKey, cipherFunc, err := ske.decryptV4(key) + return plaintextKey, cipherFunc, err + case 5: + plaintextKey, err := ske.decryptV5(key) + return plaintextKey, CipherFunction(0), err + } + err := errors.UnsupportedError("unknown SymmetricKeyEncrypted version") + return nil, CipherFunction(0), err +} + +func (ske *SymmetricKeyEncrypted) decryptV4(key []byte) ([]byte, CipherFunction, error) { + // the IV is all zeros + iv := make([]byte, ske.CipherFunc.blockSize()) + c := cipher.NewCFBDecrypter(ske.CipherFunc.new(key), iv) + plaintextKey := make([]byte, len(ske.encryptedKey)) + c.XORKeyStream(plaintextKey, ske.encryptedKey) + cipherFunc := CipherFunction(plaintextKey[0]) + if cipherFunc.blockSize() == 0 { + return nil, ske.CipherFunc, errors.UnsupportedError( + "unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + plaintextKey = plaintextKey[1:] + if len(plaintextKey) != cipherFunc.KeySize() { + return nil, cipherFunc, errors.StructuralError( + "length of decrypted key not equal to cipher keysize") + } + return plaintextKey, cipherFunc, nil +} + +func (ske *SymmetricKeyEncrypted) decryptV5(key []byte) ([]byte, error) { + blockCipher := CipherFunction(ske.CipherFunc).new(key) + aead := ske.Mode.new(blockCipher) + + adata := []byte{0xc3, byte(5), byte(ske.CipherFunc), byte(ske.Mode)} + plaintextKey, err := aead.Open(nil, ske.aeadNonce, ske.encryptedKey, adata) + if err != nil { + return nil, err + } + return plaintextKey, nil +} + +// SerializeSymmetricKeyEncrypted serializes a symmetric key packet to w. +// The packet contains a random session key, encrypted by a key derived from +// the given passphrase. The session key is returned and must be passed to +// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on +// whether config.AEADConfig != nil. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncrypted(w io.Writer, passphrase []byte, config *Config) (key []byte, err error) { + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + sessionKey := make([]byte, keySize) + _, err = io.ReadFull(config.Random(), sessionKey) + if err != nil { + return + } + + err = SerializeSymmetricKeyEncryptedReuseKey(w, sessionKey, passphrase, config) + if err != nil { + return + } + + key = sessionKey + return +} + +// SerializeSymmetricKeyEncryptedReuseKey serializes a symmetric key packet to w. +// The packet contains the given session key, encrypted by a key derived from +// the given passphrase. The session key must be passed to +// SerializeSymmetricallyEncrypted or SerializeAEADEncrypted, depending on +// whether config.AEADConfig != nil. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricKeyEncryptedReuseKey(w io.Writer, sessionKey []byte, passphrase []byte, config *Config) (err error) { + var version int + if config.AEAD() != nil { + version = 5 + } else { + version = 4 + } + cipherFunc := config.Cipher() + keySize := cipherFunc.KeySize() + if keySize == 0 { + return errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(cipherFunc))) + } + + s2kBuf := new(bytes.Buffer) + keyEncryptingKey := make([]byte, keySize) + // s2k.Serialize salts and stretches the passphrase, and writes the + // resulting key to keyEncryptingKey and the s2k descriptor to s2kBuf. + err = s2k.Serialize(s2kBuf, keyEncryptingKey, config.Random(), passphrase, &s2k.Config{Hash: config.Hash(), S2KCount: config.PasswordHashIterations()}) + if err != nil { + return + } + s2kBytes := s2kBuf.Bytes() + + var packetLength int + switch version { + case 4: + packetLength = 2 /* header */ + len(s2kBytes) + 1 /* cipher type */ + keySize + case 5: + nonceLen := config.AEAD().Mode().NonceLength() + tagLen := config.AEAD().Mode().TagLength() + packetLength = 3 + len(s2kBytes) + nonceLen + keySize + tagLen + } + err = serializeHeader(w, packetTypeSymmetricKeyEncrypted, packetLength) + if err != nil { + return + } + + buf := make([]byte, 2) + // Symmetric Key Encrypted Version + buf[0] = byte(version) + // Cipher function + buf[1] = byte(cipherFunc) + + if version == 5 { + // AEAD mode + buf = append(buf, byte(config.AEAD().Mode())) + } + _, err = w.Write(buf) + if err != nil { + return + } + _, err = w.Write(s2kBytes) + if err != nil { + return + } + + switch version { + case 4: + iv := make([]byte, cipherFunc.blockSize()) + c := cipher.NewCFBEncrypter(cipherFunc.new(keyEncryptingKey), iv) + encryptedCipherAndKey := make([]byte, keySize+1) + c.XORKeyStream(encryptedCipherAndKey, buf[1:]) + c.XORKeyStream(encryptedCipherAndKey[1:], sessionKey) + _, err = w.Write(encryptedCipherAndKey) + if err != nil { + return + } + case 5: + blockCipher := cipherFunc.new(keyEncryptingKey) + mode := config.AEAD().Mode() + aead := mode.new(blockCipher) + // Sample nonce using random reader + nonce := make([]byte, config.AEAD().Mode().NonceLength()) + _, err = io.ReadFull(config.Random(), nonce) + if err != nil { + return + } + // Seal and write (encryptedData includes auth. tag) + adata := []byte{0xc3, byte(5), byte(cipherFunc), byte(mode)} + encryptedData := aead.Seal(nil, nonce, sessionKey, adata) + _, err = w.Write(nonce) + if err != nil { + return + } + _, err = w.Write(encryptedData) + if err != nil { + return + } + } + + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go new file mode 100644 index 0000000000..8b84de177f --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/symmetrically_encrypted.go @@ -0,0 +1,290 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "crypto/cipher" + "crypto/sha1" + "crypto/subtle" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" +) + +// SymmetricallyEncrypted represents a symmetrically encrypted byte string. The +// encrypted Contents will consist of more OpenPGP packets. See RFC 4880, +// sections 5.7 and 5.13. +type SymmetricallyEncrypted struct { + MDC bool // true iff this is a type 18 packet and thus has an embedded MAC. + Contents io.Reader + prefix []byte +} + +const symmetricallyEncryptedVersion = 1 + +func (se *SymmetricallyEncrypted) parse(r io.Reader) error { + if se.MDC { + // See RFC 4880, section 5.13. + var buf [1]byte + _, err := readFull(r, buf[:]) + if err != nil { + return err + } + if buf[0] != symmetricallyEncryptedVersion { + return errors.UnsupportedError("unknown SymmetricallyEncrypted version") + } + } + se.Contents = r + return nil +} + +// Decrypt returns a ReadCloser, from which the decrypted Contents of the +// packet can be read. An incorrect key will only be detected after trying +// to decrypt the entire data. +func (se *SymmetricallyEncrypted) Decrypt(c CipherFunction, key []byte) (io.ReadCloser, error) { + keySize := c.KeySize() + if keySize == 0 { + return nil, errors.UnsupportedError("unknown cipher: " + strconv.Itoa(int(c))) + } + if len(key) != keySize { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted: incorrect key length") + } + + if se.prefix == nil { + se.prefix = make([]byte, c.blockSize()+2) + _, err := readFull(se.Contents, se.prefix) + if err != nil { + return nil, err + } + } else if len(se.prefix) != c.blockSize()+2 { + return nil, errors.InvalidArgumentError("can't try ciphers with different block lengths") + } + + ocfbResync := OCFBResync + if se.MDC { + // MDC packets use a different form of OCFB mode. + ocfbResync = OCFBNoResync + } + + s := NewOCFBDecrypter(c.new(key), se.prefix, ocfbResync) + + plaintext := cipher.StreamReader{S: s, R: se.Contents} + + if se.MDC { + // MDC packets have an embedded hash that we need to check. + h := sha1.New() + h.Write(se.prefix) + return &seMDCReader{in: plaintext, h: h}, nil + } + + // Otherwise, we just need to wrap plaintext so that it's a valid ReadCloser. + return seReader{plaintext}, nil +} + +// seReader wraps an io.Reader with a no-op Close method. +type seReader struct { + in io.Reader +} + +func (ser seReader) Read(buf []byte) (int, error) { + return ser.in.Read(buf) +} + +func (ser seReader) Close() error { + return nil +} + +const mdcTrailerSize = 1 /* tag byte */ + 1 /* length byte */ + sha1.Size + +// An seMDCReader wraps an io.Reader, maintains a running hash and keeps hold +// of the most recent 22 bytes (mdcTrailerSize). Upon EOF, those bytes form an +// MDC packet containing a hash of the previous Contents which is checked +// against the running hash. See RFC 4880, section 5.13. +type seMDCReader struct { + in io.Reader + h hash.Hash + trailer [mdcTrailerSize]byte + scratch [mdcTrailerSize]byte + trailerUsed int + error bool + eof bool +} + +func (ser *seMDCReader) Read(buf []byte) (n int, err error) { + if ser.error { + err = io.ErrUnexpectedEOF + return + } + if ser.eof { + err = io.EOF + return + } + + // If we haven't yet filled the trailer buffer then we must do that + // first. + for ser.trailerUsed < mdcTrailerSize { + n, err = ser.in.Read(ser.trailer[ser.trailerUsed:]) + ser.trailerUsed += n + if err == io.EOF { + if ser.trailerUsed != mdcTrailerSize { + n = 0 + err = io.ErrUnexpectedEOF + ser.error = true + return + } + ser.eof = true + n = 0 + return + } + + if err != nil { + n = 0 + return + } + } + + // If it's a short read then we read into a temporary buffer and shift + // the data into the caller's buffer. + if len(buf) <= mdcTrailerSize { + n, err = readFull(ser.in, ser.scratch[:len(buf)]) + copy(buf, ser.trailer[:n]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], ser.trailer[n:]) + copy(ser.trailer[mdcTrailerSize-n:], ser.scratch[:]) + if n < len(buf) { + ser.eof = true + err = io.EOF + } + return + } + + n, err = ser.in.Read(buf[mdcTrailerSize:]) + copy(buf, ser.trailer[:]) + ser.h.Write(buf[:n]) + copy(ser.trailer[:], buf[n:]) + + if err == io.EOF { + ser.eof = true + } + return +} + +// This is a new-format packet tag byte for a type 19 (MDC) packet. +const mdcPacketTagByte = byte(0x80) | 0x40 | 19 + +func (ser *seMDCReader) Close() error { + if ser.error { + return errors.ErrMDCMissing + } + + for !ser.eof { + // We haven't seen EOF so we need to read to the end + var buf [1024]byte + _, err := ser.Read(buf[:]) + if err == io.EOF { + break + } + if err != nil { + return errors.ErrMDCMissing + } + } + + ser.h.Write(ser.trailer[:2]) + + final := ser.h.Sum(nil) + if subtle.ConstantTimeCompare(final, ser.trailer[2:]) != 1 { + return errors.ErrMDCHashMismatch + } + // The hash already includes the MDC header, but we still check its value + // to confirm encryption correctness + if ser.trailer[0] != mdcPacketTagByte || ser.trailer[1] != sha1.Size { + return errors.ErrMDCMissing + } + return nil +} + +// An seMDCWriter writes through to an io.WriteCloser while maintains a running +// hash of the data written. On close, it emits an MDC packet containing the +// running hash. +type seMDCWriter struct { + w io.WriteCloser + h hash.Hash +} + +func (w *seMDCWriter) Write(buf []byte) (n int, err error) { + w.h.Write(buf) + return w.w.Write(buf) +} + +func (w *seMDCWriter) Close() (err error) { + var buf [mdcTrailerSize]byte + + buf[0] = mdcPacketTagByte + buf[1] = sha1.Size + w.h.Write(buf[:2]) + digest := w.h.Sum(nil) + copy(buf[2:], digest) + + _, err = w.w.Write(buf[:]) + if err != nil { + return + } + return w.w.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +// SerializeSymmetricallyEncrypted serializes a symmetrically encrypted packet +// to w and returns a WriteCloser to which the to-be-encrypted packets can be +// written. +// If config is nil, sensible defaults will be used. +func SerializeSymmetricallyEncrypted(w io.Writer, c CipherFunction, key []byte, config *Config) (Contents io.WriteCloser, err error) { + if c.KeySize() != len(key) { + return nil, errors.InvalidArgumentError("SymmetricallyEncrypted.Serialize: bad key length") + } + writeCloser := noOpCloser{w} + ciphertext, err := serializeStreamHeader(writeCloser, packetTypeSymmetricallyEncryptedMDC) + if err != nil { + return + } + + _, err = ciphertext.Write([]byte{symmetricallyEncryptedVersion}) + if err != nil { + return + } + + block := c.new(key) + blockSize := block.BlockSize() + iv := make([]byte, blockSize) + _, err = config.Random().Read(iv) + if err != nil { + return + } + s, prefix := NewOCFBEncrypter(block, iv, OCFBNoResync) + _, err = ciphertext.Write(prefix) + if err != nil { + return + } + plaintext := cipher.StreamWriter{S: s, W: ciphertext} + + h := sha1.New() + h.Write(iv) + h.Write(iv[blockSize-2:]) + Contents = &seMDCWriter{w: plaintext, h: h} + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go new file mode 100644 index 0000000000..0f760ade2c --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userattribute.go @@ -0,0 +1,94 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "bytes" + "image" + "image/jpeg" + "io" + "io/ioutil" +) + +const UserAttrImageSubpacket = 1 + +// UserAttribute is capable of storing other types of data about a user +// beyond name, email and a text comment. In practice, user attributes are typically used +// to store a signed thumbnail photo JPEG image of the user. +// See RFC 4880, section 5.12. +type UserAttribute struct { + Contents []*OpaqueSubpacket +} + +// NewUserAttributePhoto creates a user attribute packet +// containing the given images. +func NewUserAttributePhoto(photos ...image.Image) (uat *UserAttribute, err error) { + uat = new(UserAttribute) + for _, photo := range photos { + var buf bytes.Buffer + // RFC 4880, Section 5.12.1. + data := []byte{ + 0x10, 0x00, // Little-endian image header length (16 bytes) + 0x01, // Image header version 1 + 0x01, // JPEG + 0, 0, 0, 0, // 12 reserved octets, must be all zero. + 0, 0, 0, 0, + 0, 0, 0, 0} + if _, err = buf.Write(data); err != nil { + return + } + if err = jpeg.Encode(&buf, photo, nil); err != nil { + return + } + uat.Contents = append(uat.Contents, &OpaqueSubpacket{ + SubType: UserAttrImageSubpacket, + Contents: buf.Bytes()}) + } + return +} + +// NewUserAttribute creates a new user attribute packet containing the given subpackets. +func NewUserAttribute(contents ...*OpaqueSubpacket) *UserAttribute { + return &UserAttribute{Contents: contents} +} + +func (uat *UserAttribute) parse(r io.Reader) (err error) { + // RFC 4880, section 5.13 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uat.Contents, err = OpaqueSubpackets(b) + return +} + +// Serialize marshals the user attribute to w in the form of an OpenPGP packet, including +// header. +func (uat *UserAttribute) Serialize(w io.Writer) (err error) { + var buf bytes.Buffer + for _, sp := range uat.Contents { + err = sp.Serialize(&buf) + if err != nil { + return err + } + } + if err = serializeHeader(w, packetTypeUserAttribute, buf.Len()); err != nil { + return err + } + _, err = w.Write(buf.Bytes()) + return +} + +// ImageData returns zero or more byte slices, each containing +// JPEG File Interchange Format (JFIF), for each photo in the +// user attribute packet. +func (uat *UserAttribute) ImageData() (imageData [][]byte) { + for _, sp := range uat.Contents { + if sp.SubType == UserAttrImageSubpacket && len(sp.Contents) > 16 { + imageData = append(imageData, sp.Contents[16:]) + } + } + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go new file mode 100644 index 0000000000..d6bea7d4ac --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/packet/userid.go @@ -0,0 +1,160 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packet + +import ( + "io" + "io/ioutil" + "strings" +) + +// UserId contains text that is intended to represent the name and email +// address of the key holder. See RFC 4880, section 5.11. By convention, this +// takes the form "Full Name (Comment) " +type UserId struct { + Id string // By convention, this takes the form "Full Name (Comment) " which is split out in the fields below. + + Name, Comment, Email string +} + +func hasInvalidCharacters(s string) bool { + for _, c := range s { + switch c { + case '(', ')', '<', '>', 0: + return true + } + } + return false +} + +// NewUserId returns a UserId or nil if any of the arguments contain invalid +// characters. The invalid characters are '\x00', '(', ')', '<' and '>' +func NewUserId(name, comment, email string) *UserId { + // RFC 4880 doesn't deal with the structure of userid strings; the + // name, comment and email form is just a convention. However, there's + // no convention about escaping the metacharacters and GPG just refuses + // to create user ids where, say, the name contains a '('. We mirror + // this behaviour. + + if hasInvalidCharacters(name) || hasInvalidCharacters(comment) || hasInvalidCharacters(email) { + return nil + } + + uid := new(UserId) + uid.Name, uid.Comment, uid.Email = name, comment, email + uid.Id = name + if len(comment) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "(" + uid.Id += comment + uid.Id += ")" + } + if len(email) > 0 { + if len(uid.Id) > 0 { + uid.Id += " " + } + uid.Id += "<" + uid.Id += email + uid.Id += ">" + } + return uid +} + +func (uid *UserId) parse(r io.Reader) (err error) { + // RFC 4880, section 5.11 + b, err := ioutil.ReadAll(r) + if err != nil { + return + } + uid.Id = string(b) + uid.Name, uid.Comment, uid.Email = parseUserId(uid.Id) + return +} + +// Serialize marshals uid to w in the form of an OpenPGP packet, including +// header. +func (uid *UserId) Serialize(w io.Writer) error { + err := serializeHeader(w, packetTypeUserId, len(uid.Id)) + if err != nil { + return err + } + _, err = w.Write([]byte(uid.Id)) + return err +} + +// parseUserId extracts the name, comment and email from a user id string that +// is formatted as "Full Name (Comment) ". +func parseUserId(id string) (name, comment, email string) { + var n, c, e struct { + start, end int + } + var state int + + for offset, rune := range id { + switch state { + case 0: + // Entering name + n.start = offset + state = 1 + fallthrough + case 1: + // In name + if rune == '(' { + state = 2 + n.end = offset + } else if rune == '<' { + state = 5 + n.end = offset + } + case 2: + // Entering comment + c.start = offset + state = 3 + fallthrough + case 3: + // In comment + if rune == ')' { + state = 4 + c.end = offset + } + case 4: + // Between comment and email + if rune == '<' { + state = 5 + } + case 5: + // Entering email + e.start = offset + state = 6 + fallthrough + case 6: + // In email + if rune == '>' { + state = 7 + e.end = offset + } + default: + // After email + } + } + switch state { + case 1: + // ended in the name + n.end = len(id) + case 3: + // ended in comment + c.end = len(id) + case 6: + // ended in email + e.end = len(id) + } + + name = strings.TrimSpace(id[n.start:n.end]) + comment = strings.TrimSpace(id[c.start:c.end]) + email = strings.TrimSpace(id[e.start:e.end]) + return +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go new file mode 100644 index 0000000000..a649ebbab3 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read.go @@ -0,0 +1,508 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package openpgp implements high level operations on OpenPGP messages. +package openpgp // import "github.com/ProtonMail/go-crypto/openpgp" + +import ( + "crypto" + _ "crypto/sha256" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" +) + +// SignatureType is the armor type for a PGP signature. +var SignatureType = "PGP SIGNATURE" + +// readArmored reads an armored block with the given type. +func readArmored(r io.Reader, expectedType string) (body io.Reader, err error) { + block, err := armor.Decode(r) + if err != nil { + return + } + + if block.Type != expectedType { + return nil, errors.InvalidArgumentError("expected '" + expectedType + "', got: " + block.Type) + } + + return block.Body, nil +} + +// MessageDetails contains the result of parsing an OpenPGP encrypted and/or +// signed message. +type MessageDetails struct { + IsEncrypted bool // true if the message was encrypted. + EncryptedToKeyIds []uint64 // the list of recipient key ids. + IsSymmetricallyEncrypted bool // true if a passphrase could have decrypted the message. + DecryptedWith Key // the private key used to decrypt the message, if any. + IsSigned bool // true if the message is signed. + SignedByKeyId uint64 // the key id of the signer, if any. + SignedBy *Key // the key of the signer, if available. + LiteralData *packet.LiteralData // the metadata of the contents + UnverifiedBody io.Reader // the contents of the message. + + // If IsSigned is true and SignedBy is non-zero then the signature will + // be verified as UnverifiedBody is read. The signature cannot be + // checked until the whole of UnverifiedBody is read so UnverifiedBody + // must be consumed until EOF before the data can be trusted. Even if a + // message isn't signed (or the signer is unknown) the data may contain + // an authentication code that is only checked once UnverifiedBody has + // been consumed. Once EOF has been seen, the following fields are + // valid. (An authentication code failure is reported as a + // SignatureError error when reading from UnverifiedBody.) + Signature *packet.Signature // the signature packet itself. + SignatureError error // nil if the signature is good. + UnverifiedSignatures []*packet.Signature // all other unverified signature packets. + + decrypted io.ReadCloser +} + +// A PromptFunction is used as a callback by functions that may need to decrypt +// a private key, or prompt for a passphrase. It is called with a list of +// acceptable, encrypted private keys and a boolean that indicates whether a +// passphrase is usable. It should either decrypt a private key or return a +// passphrase to try. If the decrypted private key or given passphrase isn't +// correct, the function will be called again, forever. Any error returned will +// be passed up. +type PromptFunction func(keys []Key, symmetric bool) ([]byte, error) + +// A keyEnvelopePair is used to store a private key with the envelope that +// contains a symmetric key, encrypted with that key. +type keyEnvelopePair struct { + key Key + encryptedKey *packet.EncryptedKey +} + +// ReadMessage parses an OpenPGP message that may be signed and/or encrypted. +// The given KeyRing should contain both public keys (for signature +// verification) and, possibly encrypted, private keys for decrypting. +// If config is nil, sensible defaults will be used. +func ReadMessage(r io.Reader, keyring KeyRing, prompt PromptFunction, config *packet.Config) (md *MessageDetails, err error) { + var p packet.Packet + + var symKeys []*packet.SymmetricKeyEncrypted + var pubKeys []keyEnvelopePair + // Integrity protected encrypted packet: SymmetricallyEncrypted or AEADEncrypted + var edp packet.EncryptedDataPacket + + packets := packet.NewReader(r) + md = new(MessageDetails) + md.IsEncrypted = true + + // The message, if encrypted, starts with a number of packets + // containing an encrypted decryption key. The decryption key is either + // encrypted to a public key, or with a passphrase. This loop + // collects these packets. +ParsePackets: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.SymmetricKeyEncrypted: + // This packet contains the decryption key encrypted with a passphrase. + md.IsSymmetricallyEncrypted = true + symKeys = append(symKeys, p) + case *packet.EncryptedKey: + // This packet contains the decryption key encrypted to a public key. + md.EncryptedToKeyIds = append(md.EncryptedToKeyIds, p.KeyId) + switch p.Algo { + case packet.PubKeyAlgoRSA, packet.PubKeyAlgoRSAEncryptOnly, packet.PubKeyAlgoElGamal, packet.PubKeyAlgoECDH: + break + default: + continue + } + var keys []Key + if p.KeyId == 0 { + keys = keyring.DecryptionKeys() + } else { + keys = keyring.KeysById(p.KeyId) + } + for _, k := range keys { + pubKeys = append(pubKeys, keyEnvelopePair{k, p}) + } + case *packet.SymmetricallyEncrypted, *packet.AEADEncrypted: + edp = p.(packet.EncryptedDataPacket) + break ParsePackets + case *packet.Compressed, *packet.LiteralData, *packet.OnePassSignature: + // This message isn't encrypted. + if len(symKeys) != 0 || len(pubKeys) != 0 { + return nil, errors.StructuralError("key material not followed by encrypted message") + } + packets.Unread(p) + return readSignedMessage(packets, nil, keyring, config) + } + } + + var candidates []Key + var decrypted io.ReadCloser + + // Now that we have the list of encrypted keys we need to decrypt at + // least one of them or, if we cannot, we need to call the prompt + // function so that it can decrypt a key or give us a passphrase. +FindKey: + for { + // See if any of the keys already have a private key available + candidates = candidates[:0] + candidateFingerprints := make(map[string]bool) + + for _, pk := range pubKeys { + if pk.key.PrivateKey == nil { + continue + } + if !pk.key.PrivateKey.Encrypted { + if len(pk.encryptedKey.Key) == 0 { + errDec := pk.encryptedKey.Decrypt(pk.key.PrivateKey, config) + if errDec != nil { + continue + } + } + // Try to decrypt symmetrically encrypted + decrypted, err = edp.Decrypt(pk.encryptedKey.CipherFunc, pk.encryptedKey.Key) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + md.DecryptedWith = pk.key + break FindKey + } + } else { + fpr := string(pk.key.PublicKey.Fingerprint[:]) + if v := candidateFingerprints[fpr]; v { + continue + } + candidates = append(candidates, pk.key) + candidateFingerprints[fpr] = true + } + } + + if len(candidates) == 0 && len(symKeys) == 0 { + return nil, errors.ErrKeyIncorrect + } + + if prompt == nil { + return nil, errors.ErrKeyIncorrect + } + + passphrase, err := prompt(candidates, len(symKeys) != 0) + if err != nil { + return nil, err + } + + // Try the symmetric passphrase first + if len(symKeys) != 0 && passphrase != nil { + for _, s := range symKeys { + key, cipherFunc, err := s.Decrypt(passphrase) + // On wrong passphrase, session key decryption is very likely to result in an invalid cipherFunc: + // only for < 5% of cases we will proceed to decrypt the data + if err == nil { + decrypted, err = edp.Decrypt(cipherFunc, key) + // TODO: ErrKeyIncorrect is no longer thrown on SEIP decryption, + // but it might still be relevant for when we implement AEAD decryption (otherwise, remove?) + if err != nil && err != errors.ErrKeyIncorrect { + return nil, err + } + if decrypted != nil { + break FindKey + } + } + } + } + } + + md.decrypted = decrypted + if err := packets.Push(decrypted); err != nil { + return nil, err + } + mdFinal, sensitiveParsingErr := readSignedMessage(packets, md, keyring, config) + if sensitiveParsingErr != nil { + return nil, errors.StructuralError("parsing error") + } + return mdFinal, nil +} + +// readSignedMessage reads a possibly signed message if mdin is non-zero then +// that structure is updated and returned. Otherwise a fresh MessageDetails is +// used. +func readSignedMessage(packets *packet.Reader, mdin *MessageDetails, keyring KeyRing, config *packet.Config) (md *MessageDetails, err error) { + if mdin == nil { + mdin = new(MessageDetails) + } + md = mdin + + var p packet.Packet + var h hash.Hash + var wrappedHash hash.Hash + var prevLast bool +FindLiteralData: + for { + p, err = packets.Next() + if err != nil { + return nil, err + } + switch p := p.(type) { + case *packet.Compressed: + if err := packets.Push(p.Body); err != nil { + return nil, err + } + case *packet.OnePassSignature: + if prevLast { + return nil, errors.UnsupportedError("nested signature packets") + } + + if p.IsLast { + prevLast = true + } + + h, wrappedHash, err = hashForSignature(p.Hash, p.SigType) + if err != nil { + md.SignatureError = err + } + + md.IsSigned = true + md.SignedByKeyId = p.KeyId + keys := keyring.KeysByIdUsage(p.KeyId, packet.KeyFlagSign) + if len(keys) > 0 { + md.SignedBy = &keys[0] + } + case *packet.LiteralData: + md.LiteralData = p + break FindLiteralData + } + } + + if md.IsSigned && md.SignatureError == nil { + md.UnverifiedBody = &signatureCheckReader{packets, h, wrappedHash, md, config} + } else if md.decrypted != nil { + md.UnverifiedBody = checkReader{md} + } else { + md.UnverifiedBody = md.LiteralData.Body + } + + return md, nil +} + +// hashForSignature returns a pair of hashes that can be used to verify a +// signature. The signature may specify that the contents of the signed message +// should be preprocessed (i.e. to normalize line endings). Thus this function +// returns two hashes. The second should be used to hash the message itself and +// performs any needed preprocessing. +func hashForSignature(hashId crypto.Hash, sigType packet.SignatureType) (hash.Hash, hash.Hash, error) { + if hashId == crypto.MD5 { + return nil, nil, errors.UnsupportedError("insecure hash algorithm: MD5") + } + if !hashId.Available() { + return nil, nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashId))) + } + h := hashId.New() + + switch sigType { + case packet.SigTypeBinary: + return h, h, nil + case packet.SigTypeText: + return h, NewCanonicalTextHash(h), nil + } + + return nil, nil, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(sigType))) +} + +// checkReader wraps an io.Reader from a LiteralData packet. When it sees EOF +// it closes the ReadCloser from any SymmetricallyEncrypted packet to trigger +// MDC checks. +type checkReader struct { + md *MessageDetails +} + +func (cr checkReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := cr.md.LiteralData.Body.Read(buf) + if sensitiveParsingError == io.EOF { + mdcErr := cr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// signatureCheckReader wraps an io.Reader from a LiteralData packet and hashes +// the data as it is read. When it sees an EOF from the underlying io.Reader +// it parses and checks a trailing Signature packet and triggers any MDC checks. +type signatureCheckReader struct { + packets *packet.Reader + h, wrappedHash hash.Hash + md *MessageDetails + config *packet.Config +} + +func (scr *signatureCheckReader) Read(buf []byte) (int, error) { + n, sensitiveParsingError := scr.md.LiteralData.Body.Read(buf) + + // Hash only if required + if scr.md.SignedBy != nil { + scr.wrappedHash.Write(buf[:n]) + } + + if sensitiveParsingError == io.EOF { + var p packet.Packet + var readError error + var sig *packet.Signature + + p, readError = scr.packets.Next() + for readError == nil { + var ok bool + if sig, ok = p.(*packet.Signature); ok { + if sig.Version == 5 && (sig.SigType == 0x00 || sig.SigType == 0x01) { + sig.Metadata = scr.md.LiteralData + } + + // If signature KeyID matches + if scr.md.SignedBy != nil && *sig.IssuerKeyId == scr.md.SignedByKeyId { + scr.md.Signature = sig + scr.md.SignatureError = scr.md.SignedBy.PublicKey.VerifySignature(scr.h, scr.md.Signature) + if scr.md.SignatureError == nil && scr.md.Signature.SigExpired(scr.config.Now()) { + scr.md.SignatureError = errors.ErrSignatureExpired + } + } else { + scr.md.UnverifiedSignatures = append(scr.md.UnverifiedSignatures, sig) + } + } + + p, readError = scr.packets.Next() + } + + if scr.md.SignedBy != nil && scr.md.Signature == nil { + if scr.md.UnverifiedSignatures == nil { + scr.md.SignatureError = errors.StructuralError("LiteralData not followed by signature") + } else { + scr.md.SignatureError = errors.StructuralError("No matching signature found") + } + } + + // The SymmetricallyEncrypted packet, if any, might have an + // unsigned hash of its own. In order to check this we need to + // close that Reader. + if scr.md.decrypted != nil { + mdcErr := scr.md.decrypted.Close() + if mdcErr != nil { + return n, mdcErr + } + } + return n, io.EOF + } + + if sensitiveParsingError != nil { + return n, errors.StructuralError("parsing error") + } + + return n, nil +} + +// CheckDetachedSignature takes a signed file and a detached signature and +// returns the signer if the signature is valid. If the signer isn't known, +// ErrUnknownIssuer is returned. +func CheckDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + var expectedHashes []crypto.Hash + return CheckDetachedSignatureAndHash(keyring, signed, signature, expectedHashes, config) +} + +// CheckDetachedSignatureAndHash performs the same actions as +// CheckDetachedSignature and checks that the expected hash functions were used. +func CheckDetachedSignatureAndHash(keyring KeyRing, signed, signature io.Reader, expectedHashes []crypto.Hash, config *packet.Config) (signer *Entity, err error) { + var issuerKeyId uint64 + var hashFunc crypto.Hash + var sigType packet.SignatureType + var keys []Key + var p packet.Packet + + expectedHashesLen := len(expectedHashes) + packets := packet.NewReader(signature) + var sig *packet.Signature + for { + p, err = packets.Next() + if err == io.EOF { + return nil, errors.ErrUnknownIssuer + } + if err != nil { + return nil, err + } + + var ok bool + sig, ok = p.(*packet.Signature) + if !ok { + return nil, errors.StructuralError("non signature packet found") + } + if sig.IssuerKeyId == nil { + return nil, errors.StructuralError("signature doesn't have an issuer") + } + issuerKeyId = *sig.IssuerKeyId + hashFunc = sig.Hash + sigType = sig.SigType + + for i, expectedHash := range expectedHashes { + if hashFunc == expectedHash { + break + } + if i+1 == expectedHashesLen { + return nil, errors.StructuralError("hash algorithm mismatch with cleartext message headers") + } + } + + keys = keyring.KeysByIdUsage(issuerKeyId, packet.KeyFlagSign) + if len(keys) > 0 { + break + } + } + + if len(keys) == 0 { + panic("unreachable") + } + + h, wrappedHash, err := hashForSignature(hashFunc, sigType) + if err != nil { + return nil, err + } + + if _, err := io.Copy(wrappedHash, signed); err != nil && err != io.EOF { + return nil, err + } + + for _, key := range keys { + err = key.PublicKey.VerifySignature(h, sig) + if err == nil { + now := config.Now() + if sig.SigExpired(now) { + return key.Entity, errors.ErrSignatureExpired + } + if key.PublicKey.KeyExpired(key.SelfSignature, now) { + return key.Entity, errors.ErrKeyExpired + } + return key.Entity, nil + } + } + + return nil, err +} + +// CheckArmoredDetachedSignature performs the same actions as +// CheckDetachedSignature but expects the signature to be armored. +func CheckArmoredDetachedSignature(keyring KeyRing, signed, signature io.Reader, config *packet.Config) (signer *Entity, err error) { + body, err := readArmored(signature, SignatureType) + if err != nil { + return + } + + return CheckDetachedSignature(keyring, signed, body, config) +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go new file mode 100644 index 0000000000..8caed36e3e --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/read_write_test_data.go @@ -0,0 +1,173 @@ +package openpgp + +const testKey1KeyId = 0xA34D7E18C20C31BB +const testKey3KeyId = 0x338934250CCC0360 +const testKeyP256KeyId = 0xd44a2c495918513e + +const signedInput = "Signed message\nline 2\nline 3\n" +const signedTextInput = "Signed message\r\nline 2\r\nline 3\r\n" + +const recipientUnspecifiedHex = "848c0300000000000000000103ff62d4d578d03cf40c3da998dfe216c074fa6ddec5e31c197c9666ba292830d91d18716a80f699f9d897389a90e6d62d0238f5f07a5248073c0f24920e4bc4a30c2d17ee4e0cae7c3d4aaa4e8dced50e3010a80ee692175fa0385f62ecca4b56ee6e9980aa3ec51b61b077096ac9e800edaf161268593eedb6cc7027ff5cb32745d250010d407a6221ae22ef18469b444f2822478c4d190b24d36371a95cb40087cdd42d9399c3d06a53c0673349bfb607927f20d1e122bde1e2bf3aa6cae6edf489629bcaa0689539ae3b718914d88ededc3b" + +const detachedSignatureHex = "889c04000102000605024d449cd1000a0910a34d7e18c20c31bb167603ff57718d09f28a519fdc7b5a68b6a3336da04df85e38c5cd5d5bd2092fa4629848a33d85b1729402a2aab39c3ac19f9d573f773cc62c264dc924c067a79dfd8a863ae06c7c8686120760749f5fd9b1e03a64d20a7df3446ddc8f0aeadeaeba7cbaee5c1e366d65b6a0c6cc749bcb912d2f15013f812795c2e29eb7f7b77f39ce77" + +const detachedSignatureTextHex = "889c04010102000605024d449d21000a0910a34d7e18c20c31bbc8c60400a24fbef7342603a41cb1165767bd18985d015fb72fe05db42db36cfb2f1d455967f1e491194fbf6cf88146222b23bf6ffbd50d17598d976a0417d3192ff9cc0034fd00f287b02e90418bbefe609484b09231e4e7a5f3562e199bf39909ab5276c4d37382fe088f6b5c3426fc1052865da8b3ab158672d58b6264b10823dc4b39" + +const detachedSignatureDSAHex = "884604001102000605024d6c4eac000a0910338934250ccc0360f18d00a087d743d6405ed7b87755476629600b8b694a39e900a0abff8126f46faf1547c1743c37b21b4ea15b8f83" + +const detachedSignatureP256Hex = "885e0400130a0006050256e5bb00000a0910d44a2c495918513edef001009841a4f792beb0befccb35c8838a6a87d9b936beaa86db6745ddc7b045eee0cf00fd1ac1f78306b17e965935dd3f8bae4587a76587e4af231efe19cc4011a8434817" + +// The plaintext is https://www.gutenberg.org/cache/epub/1080/pg1080.txt +const modestProposalSha512 = "lbbrB1+WP3T9AaC9OQqBdOcCjgeEQadlulXsNPgVx0tyqPzDHwUugZ2gE7V0ESKAw6kAVfgkcuvfgxAAGaeHtw==" + +const testKeys1And2Hex = "988d044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd0011010001b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b0020003b88d044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f0011010001889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab0020003988d044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b0020003b88d044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020003" + +const testKeys1And2PrivateHex = "9501d8044d3c5c10010400b1d13382944bd5aba23a4312968b5095d14f947f600eb478e14a6fcb16b0e0cac764884909c020bc495cfcc39a935387c661507bdb236a0612fb582cac3af9b29cc2c8c70090616c41b662f4da4c1201e195472eb7f4ae1ccbcbf9940fe21d985e379a5563dde5b9a23d35f1cfaa5790da3b79db26f23695107bfaca8e7b5bcd00110100010003ff4d91393b9a8e3430b14d6209df42f98dc927425b881f1209f319220841273a802a97c7bdb8b3a7740b3ab5866c4d1d308ad0d3a79bd1e883aacf1ac92dfe720285d10d08752a7efe3c609b1d00f17f2805b217be53999a7da7e493bfc3e9618fd17018991b8128aea70a05dbce30e4fbe626aa45775fa255dd9177aabf4df7cf0200c1ded12566e4bc2bb590455e5becfb2e2c9796482270a943343a7835de41080582c2be3caf5981aa838140e97afa40ad652a0b544f83eb1833b0957dce26e47b0200eacd6046741e9ce2ec5beb6fb5e6335457844fb09477f83b050a96be7da043e17f3a9523567ed40e7a521f818813a8b8a72209f1442844843ccc7eb9805442570200bdafe0438d97ac36e773c7162028d65844c4d463e2420aa2228c6e50dc2743c3d6c72d0d782a5173fe7be2169c8a9f4ef8a7cf3e37165e8c61b89c346cdc6c1799d2b41054657374204b6579203120285253412988b804130102002205024d3c5c10021b03060b090807030206150802090a0b0416020301021e01021780000a0910a34d7e18c20c31bbb5b304009cc45fe610b641a2c146331be94dade0a396e73ca725e1b25c21708d9cab46ecca5ccebc23055879df8f99eea39b377962a400f2ebdc36a7c99c333d74aeba346315137c3ff9d0a09b0273299090343048afb8107cf94cbd1400e3026f0ccac7ecebbc4d78588eb3e478fe2754d3ca664bcf3eac96ca4a6b0c8d7df5102f60f6b00200009d01d8044d3c5c10010400b201df61d67487301f11879d514f4248ade90c8f68c7af1284c161098de4c28c2850f1ec7b8e30f959793e571542ffc6532189409cb51c3d30dad78c4ad5165eda18b20d9826d8707d0f742e2ab492103a85bbd9ddf4f5720f6de7064feb0d39ee002219765bb07bcfb8b877f47abe270ddeda4f676108cecb6b9bb2ad484a4f00110100010003fd17a7490c22a79c59281fb7b20f5e6553ec0c1637ae382e8adaea295f50241037f8997cf42c1ce26417e015091451b15424b2c59eb8d4161b0975630408e394d3b00f88d4b4e18e2cc85e8251d4753a27c639c83f5ad4a571c4f19d7cd460b9b73c25ade730c99df09637bd173d8e3e981ac64432078263bb6dc30d3e974150dd0200d0ee05be3d4604d2146fb0457f31ba17c057560785aa804e8ca5530a7cd81d3440d0f4ba6851efcfd3954b7e68908fc0ba47f7ac37bf559c6c168b70d3a7c8cd0200da1c677c4bce06a068070f2b3733b0a714e88d62aa3f9a26c6f5216d48d5c2b5624144f3807c0df30be66b3268eeeca4df1fbded58faf49fc95dc3c35f134f8b01fd1396b6c0fc1b6c4f0eb8f5e44b8eace1e6073e20d0b8bc5385f86f1cf3f050f66af789f3ef1fc107b7f4421e19e0349c730c68f0a226981f4e889054fdb4dc149e8e889f04180102000905024d3c5c10021b0c000a0910a34d7e18c20c31bb1a03040085c8d62e16d05dc4e9dad64953c8a2eed8b6c12f92b1575eeaa6dcf7be9473dd5b24b37b6dffbb4e7c99ed1bd3cb11634be19b3e6e207bed7505c7ca111ccf47cb323bf1f8851eb6360e8034cbff8dd149993c959de89f8f77f38e7e98b8e3076323aa719328e2b408db5ec0d03936efd57422ba04f925cdc7b4c1af7590e40ab00200009501fe044d3c5c33010400b488c3e5f83f4d561f317817538d9d0397981e9aef1321ca68ebfae1cf8b7d388e19f4b5a24a82e2fbbf1c6c26557a6c5845307a03d815756f564ac7325b02bc83e87d5480a8fae848f07cb891f2d51ce7df83dcafdc12324517c86d472cc0ee10d47a68fd1d9ae49a6c19bbd36d82af597a0d88cc9c49de9df4e696fc1f0b5d0011010001fe030302e9030f3c783e14856063f16938530e148bc57a7aa3f3e4f90df9dceccdc779bc0835e1ad3d006e4a8d7b36d08b8e0de5a0d947254ecfbd22037e6572b426bcfdc517796b224b0036ff90bc574b5509bede85512f2eefb520fb4b02aa523ba739bff424a6fe81c5041f253f8d757e69a503d3563a104d0d49e9e890b9d0c26f96b55b743883b472caa7050c4acfd4a21f875bdf1258d88bd61224d303dc9df77f743137d51e6d5246b88c406780528fd9a3e15bab5452e5b93970d9dcc79f48b38651b9f15bfbcf6da452837e9cc70683d1bdca94507870f743e4ad902005812488dd342f836e72869afd00ce1850eea4cfa53ce10e3608e13d3c149394ee3cbd0e23d018fcbcb6e2ec5a1a22972d1d462ca05355d0d290dd2751e550d5efb38c6c89686344df64852bf4ff86638708f644e8ec6bd4af9b50d8541cb91891a431326ab2e332faa7ae86cfb6e0540aa63160c1e5cdd5a4add518b303fff0a20117c6bc77f7cfbaf36b04c865c6c2b42754657374204b6579203220285253412c20656e637279707465642070726976617465206b65792988b804130102002205024d3c5c33021b03060b090807030206150802090a0b0416020301021e01021780000a0910d4984f961e35246b98940400908a73b6a6169f700434f076c6c79015a49bee37130eaf23aaa3cfa9ce60bfe4acaa7bc95f1146ada5867e0079babb38804891f4f0b8ebca57a86b249dee786161a755b7a342e68ccf3f78ed6440a93a6626beb9a37aa66afcd4f888790cb4bb46d94a4ae3eb3d7d3e6b00f6bfec940303e89ec5b32a1eaaacce66497d539328b00200009d01fe044d3c5c33010400a4e913f9442abcc7f1804ccab27d2f787ffa592077ca935a8bb23165bd8d57576acac647cc596b2c3f814518cc8c82953c7a4478f32e0cf645630a5ba38d9618ef2bc3add69d459ae3dece5cab778938d988239f8c5ae437807075e06c828019959c644ff05ef6a5a1dab72227c98e3a040b0cf219026640698d7a13d8538a570011010001fe030302e9030f3c783e148560f936097339ae381d63116efcf802ff8b1c9360767db5219cc987375702a4123fd8657d3e22700f23f95020d1b261eda5257e9a72f9a918e8ef22dd5b3323ae03bbc1923dd224db988cadc16acc04b120a9f8b7e84da9716c53e0334d7b66586ddb9014df604b41be1e960dcfcbc96f4ed150a1a0dd070b9eb14276b9b6be413a769a75b519a53d3ecc0c220e85cd91ca354d57e7344517e64b43b6e29823cbd87eae26e2b2e78e6dedfbb76e3e9f77bcb844f9a8932eb3db2c3f9e44316e6f5d60e9e2a56e46b72abe6b06dc9a31cc63f10023d1f5e12d2a3ee93b675c96f504af0001220991c88db759e231b3320dcedf814dcf723fd9857e3d72d66a0f2af26950b915abdf56c1596f46a325bf17ad4810d3535fb02a259b247ac3dbd4cc3ecf9c51b6c07cebb009c1506fba0a89321ec8683e3fd009a6e551d50243e2d5092fefb3321083a4bad91320dc624bd6b5dddf93553e3d53924c05bfebec1fb4bd47e89a1a889f04180102000905024d3c5c33021b0c000a0910d4984f961e35246b26c703ff7ee29ef53bc1ae1ead533c408fa136db508434e233d6e62be621e031e5940bbd4c08142aed0f82217e7c3e1ec8de574bc06ccf3c36633be41ad78a9eacd209f861cae7b064100758545cc9dd83db71806dc1cfd5fb9ae5c7474bba0c19c44034ae61bae5eca379383339dece94ff56ff7aa44a582f3e5c38f45763af577c0934b0020000" + +const dsaElGamalTestKeysHex = "9501e1044dfcb16a110400aa3e5c1a1f43dd28c2ffae8abf5cfce555ee874134d8ba0a0f7b868ce2214beddc74e5e1e21ded354a95d18acdaf69e5e342371a71fbb9093162e0c5f3427de413a7f2c157d83f5cd2f9d791256dc4f6f0e13f13c3302af27f2384075ab3021dff7a050e14854bbde0a1094174855fc02f0bae8e00a340d94a1f22b32e48485700a0cec672ac21258fb95f61de2ce1af74b2c4fa3e6703ff698edc9be22c02ae4d916e4fa223f819d46582c0516235848a77b577ea49018dcd5e9e15cff9dbb4663a1ae6dd7580fa40946d40c05f72814b0f88481207e6c0832c3bded4853ebba0a7e3bd8e8c66df33d5a537cd4acf946d1080e7a3dcea679cb2b11a72a33a2b6a9dc85f466ad2ddf4c3db6283fa645343286971e3dd700703fc0c4e290d45767f370831a90187e74e9972aae5bff488eeff7d620af0362bfb95c1a6c3413ab5d15a2e4139e5d07a54d72583914661ed6a87cce810be28a0aa8879a2dd39e52fb6fe800f4f181ac7e328f740cde3d09a05cecf9483e4cca4253e60d4429ffd679d9996a520012aad119878c941e3cf151459873bdfc2a9563472fe0303027a728f9feb3b864260a1babe83925ce794710cfd642ee4ae0e5b9d74cee49e9c67b6cd0ea5dfbb582132195a121356a1513e1bca73e5b80c58c7ccb4164453412f456c47616d616c2054657374204b65792031886204131102002205024dfcb16a021b03060b090807030206150802090a0b0416020301021e01021780000a091033af447ccd759b09fadd00a0b8fd6f5a790bad7e9f2dbb7632046dc4493588db009c087c6a9ba9f7f49fab221587a74788c00db4889ab00200009d0157044dfcb16a1004008dec3f9291205255ccff8c532318133a6840739dd68b03ba942676f9038612071447bf07d00d559c5c0875724ea16a4c774f80d8338b55fca691a0522e530e604215b467bbc9ccfd483a1da99d7bc2648b4318fdbd27766fc8bfad3fddb37c62b8ae7ccfe9577e9b8d1e77c1d417ed2c2ef02d52f4da11600d85d3229607943700030503ff506c94c87c8cab778e963b76cf63770f0a79bf48fb49d3b4e52234620fc9f7657f9f8d56c96a2b7c7826ae6b57ebb2221a3fe154b03b6637cea7e6d98e3e45d87cf8dc432f723d3d71f89c5192ac8d7290684d2c25ce55846a80c9a7823f6acd9bb29fa6cd71f20bc90eccfca20451d0c976e460e672b000df49466408d527affe0303027a728f9feb3b864260abd761730327bca2aaa4ea0525c175e92bf240682a0e83b226f97ecb2e935b62c9a133858ce31b271fa8eb41f6a1b3cd72a63025ce1a75ee4180dcc284884904181102000905024dfcb16a021b0c000a091033af447ccd759b09dd0b009e3c3e7296092c81bee5a19929462caaf2fff3ae26009e218c437a2340e7ea628149af1ec98ec091a43992b00200009501e1044dfcb1be1104009f61faa61aa43df75d128cbe53de528c4aec49ce9360c992e70c77072ad5623de0a3a6212771b66b39a30dad6781799e92608316900518ec01184a85d872365b7d2ba4bacfb5882ea3c2473d3750dc6178cc1cf82147fb58caa28b28e9f12f6d1efcb0534abed644156c91cca4ab78834268495160b2400bc422beb37d237c2300a0cac94911b6d493bda1e1fbc6feeca7cb7421d34b03fe22cec6ccb39675bb7b94a335c2b7be888fd3906a1125f33301d8aa6ec6ee6878f46f73961c8d57a3e9544d8ef2a2cbfd4d52da665b1266928cfe4cb347a58c412815f3b2d2369dec04b41ac9a71cc9547426d5ab941cccf3b18575637ccfb42df1a802df3cfe0a999f9e7109331170e3a221991bf868543960f8c816c28097e503fe319db10fb98049f3a57d7c80c420da66d56f3644371631fad3f0ff4040a19a4fedc2d07727a1b27576f75a4d28c47d8246f27071e12d7a8de62aad216ddbae6aa02efd6b8a3e2818cda48526549791ab277e447b3a36c57cefe9b592f5eab73959743fcc8e83cbefec03a329b55018b53eec196765ae40ef9e20521a603c551efe0303020950d53a146bf9c66034d00c23130cce95576a2ff78016ca471276e8227fb30b1ffbd92e61804fb0c3eff9e30b1a826ee8f3e4730b4d86273ca977b4164453412f456c47616d616c2054657374204b65792032886204131102002205024dfcb1be021b03060b090807030206150802090a0b0416020301021e01021780000a0910a86bf526325b21b22bd9009e34511620415c974750a20df5cb56b182f3b48e6600a0a9466cb1a1305a84953445f77d461593f1d42bc1b00200009d0157044dfcb1be1004009565a951da1ee87119d600c077198f1c1bceb0f7aa54552489298e41ff788fa8f0d43a69871f0f6f77ebdfb14a4260cf9fbeb65d5844b4272a1904dd95136d06c3da745dc46327dd44a0f16f60135914368c8039a34033862261806bb2c5ce1152e2840254697872c85441ccb7321431d75a747a4bfb1d2c66362b51ce76311700030503fc0ea76601c196768070b7365a200e6ddb09307f262d5f39eec467b5f5784e22abdf1aa49226f59ab37cb49969d8f5230ea65caf56015abda62604544ed526c5c522bf92bed178a078789f6c807b6d34885688024a5bed9e9f8c58d11d4b82487b44c5f470c5606806a0443b79cadb45e0f897a561a53f724e5349b9267c75ca17fe0303020950d53a146bf9c660bc5f4ce8f072465e2d2466434320c1e712272fafc20e342fe7608101580fa1a1a367e60486a7cd1246b7ef5586cf5e10b32762b710a30144f12dd17dd4884904181102000905024dfcb1be021b0c000a0910a86bf526325b21b2904c00a0b2b66b4b39ccffda1d10f3ea8d58f827e30a8b8e009f4255b2d8112a184e40cde43a34e8655ca7809370b0020000" + +const signedMessageHex = "a3019bc0cbccc0c4b8d8b74ee2108fe16ec6d3ca490cbe362d3f8333d3f352531472538b8b13d353b97232f352158c20943157c71c16064626063656269052062e4e01987e9b6fccff4b7df3a34c534b23e679cbec3bc0f8f6e64dfb4b55fe3f8efa9ce110ddb5cd79faf1d753c51aecfa669f7e7aa043436596cccc3359cb7dd6bbe9ecaa69e5989d9e57209571edc0b2fa7f57b9b79a64ee6e99ce1371395fee92fec2796f7b15a77c386ff668ee27f6d38f0baa6c438b561657377bf6acff3c5947befd7bf4c196252f1d6e5c524d0300" + +const signedTextMessageHex = "a3019bc0cbccc8c4b8d8b74ee2108fe16ec6d36a250cbece0c178233d3f352531472538b8b13d35379b97232f352158ca0b4312f57c71c1646462606365626906a062e4e019811591798ff99bf8afee860b0d8a8c2a85c3387e3bcf0bb3b17987f2bbcfab2aa526d930cbfd3d98757184df3995c9f3e7790e36e3e9779f06089d4c64e9e47dd6202cb6e9bc73c5d11bb59fbaf89d22d8dc7cf199ddf17af96e77c5f65f9bbed56f427bd8db7af37f6c9984bf9385efaf5f184f986fb3e6adb0ecfe35bbf92d16a7aa2a344fb0bc52fb7624f0200" + +const signedEncryptedMessageHex = "c18c032a67d68660df41c70103ff5a84c9a72f80e74ef0384c2d6a9ebfe2b09e06a8f298394f6d2abf174e40934ab0ec01fb2d0ddf21211c6fe13eb238563663b017a6b44edca552eb4736c4b7dc6ed907dd9e12a21b51b64b46f902f76fb7aaf805c1db8070574d8d0431a23e324a750f77fb72340a17a42300ee4ca8207301e95a731da229a63ab9c6b44541fbd2c11d016d810b3b3b2b38f15b5b40f0a4910332829c2062f1f7cc61f5b03677d73c54cafa1004ced41f315d46444946faae571d6f426e6dbd45d9780eb466df042005298adabf7ce0ef766dfeb94cd449c7ed0046c880339599c4711af073ce649b1e237c40b50a5536283e03bdbb7afad78bd08707715c67fb43295f905b4c479178809d429a8e167a9a8c6dfd8ab20b4edebdc38d6dec879a3202e1b752690d9bb5b0c07c5a227c79cc200e713a99251a4219d62ad5556900cf69bd384b6c8e726c7be267471d0d23af956da165af4af757246c2ebcc302b39e8ef2fccb4971b234fcda22d759ddb20e27269ee7f7fe67898a9de721bfa02ab0becaa046d00ea16cb1afc4e2eab40d0ac17121c565686e5cbd0cbdfbd9d6db5c70278b9c9db5a83176d04f61fbfbc4471d721340ede2746e5c312ded4f26787985af92b64fae3f253dbdde97f6a5e1996fd4d865599e32ff76325d3e9abe93184c02988ee89a4504356a4ef3b9b7a57cbb9637ca90af34a7676b9ef559325c3cca4e29d69fec1887f5440bb101361d744ad292a8547f22b4f22b419a42aa836169b89190f46d9560824cb2ac6e8771de8223216a5e647e132ab9eebcba89569ab339cb1c3d70fe806b31f4f4c600b4103b8d7583ebff16e43dcda551e6530f975122eb8b29" + +const verifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const unverifiedSignatureEncryptedMessageHex = "c2b304000108000605026048f6d600210910a34d7e18c20c31bb1621045fb74b1d03b1e3cb31bc2f8aa34d7e18c20c31bb9a3b0400a32ddac1af259c1b0abab0041327ea04970944401978fb647dd1cf9aba4f164e43f0d8a9389501886474bdd4a6e77f6aea945c07dfbf87743835b44cc2c39a1f9aeecfa83135abc92e18e50396f2e6a06c44e0188b0081effbfb4160d28f118d4ff73dd199a102e47cffd8c7ff2bacd83ae72b5820c021a486766dd587b5da61" + +const signedEncryptedMessage2Hex = "85010e03cf6a7abcd43e36731003fb057f5495b79db367e277cdbe4ab90d924ddee0c0381494112ff8c1238fb0184af35d1731573b01bc4c55ecacd2aafbe2003d36310487d1ecc9ac994f3fada7f9f7f5c3a64248ab7782906c82c6ff1303b69a84d9a9529c31ecafbcdb9ba87e05439897d87e8a2a3dec55e14df19bba7f7bd316291c002ae2efd24f83f9e3441203fc081c0c23dc3092a454ca8a082b27f631abf73aca341686982e8fbda7e0e7d863941d68f3de4a755c2964407f4b5e0477b3196b8c93d551dd23c8beef7d0f03fbb1b6066f78907faf4bf1677d8fcec72651124080e0b7feae6b476e72ab207d38d90b958759fdedfc3c6c35717c9dbfc979b3cfbbff0a76d24a5e57056bb88acbd2a901ef64bc6e4db02adc05b6250ff378de81dca18c1910ab257dff1b9771b85bb9bbe0a69f5989e6d1710a35e6dfcceb7d8fb5ccea8db3932b3d9ff3fe0d327597c68b3622aec8e3716c83a6c93f497543b459b58ba504ed6bcaa747d37d2ca746fe49ae0a6ce4a8b694234e941b5159ff8bd34b9023da2814076163b86f40eed7c9472f81b551452d5ab87004a373c0172ec87ea6ce42ccfa7dbdad66b745496c4873d8019e8c28d6b3" + +const signatureEncryptedMessage2Hex = "c24604001102000605024dfd0166000a091033af447ccd759b09bae600a096ec5e63ecf0a403085e10f75cc3bab327663282009f51fad9df457ed8d2b70d8a73c76e0443eac0f377" + +const symmetricallyEncryptedCompressedHex = "c32e040903085a357c1a7b5614ed00cc0d1d92f428162058b3f558a0fb0980d221ebac6c97d5eda4e0fe32f6e706e94dd263012d6ca1ef8c4bbd324098225e603a10c85ebf09cbf7b5aeeb5ce46381a52edc51038b76a8454483be74e6dcd1e50d5689a8ae7eceaeefed98a0023d49b22eb1f65c2aa1ef1783bb5e1995713b0457102ec3c3075fe871267ffa4b686ad5d52000d857" + +const dsaTestKeyHex = "9901a2044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const dsaTestKeyPrivateHex = "9501bb044d6c49de110400cb5ce438cf9250907ac2ba5bf6547931270b89f7c4b53d9d09f4d0213a5ef2ec1f26806d3d259960f872a4a102ef1581ea3f6d6882d15134f21ef6a84de933cc34c47cc9106efe3bd84c6aec12e78523661e29bc1a61f0aab17fa58a627fd5fd33f5149153fbe8cd70edf3d963bc287ef875270ff14b5bfdd1bca4483793923b00a0fe46d76cb6e4cbdc568435cd5480af3266d610d303fe33ae8273f30a96d4d34f42fa28ce1112d425b2e3bf7ea553d526e2db6b9255e9dc7419045ce817214d1a0056dbc8d5289956a4b1b69f20f1105124096e6a438f41f2e2495923b0f34b70642607d45559595c7fe94d7fa85fc41bf7d68c1fd509ebeaa5f315f6059a446b9369c277597e4f474a9591535354c7e7f4fd98a08aa60400b130c24ff20bdfbf683313f5daebf1c9b34b3bdadfc77f2ddd72ee1fb17e56c473664bc21d66467655dd74b9005e3a2bacce446f1920cd7017231ae447b67036c9b431b8179deacd5120262d894c26bc015bffe3d827ba7087ad9b700d2ca1f6d16cc1786581e5dd065f293c31209300f9b0afcc3f7c08dd26d0a22d87580b4d00009f592e0619d823953577d4503061706843317e4fee083db41054657374204b65792033202844534129886204131102002205024d6c49de021b03060b090807030206150802090a0b0416020301021e01021780000a0910338934250ccc03607e0400a0bdb9193e8a6b96fc2dfc108ae848914b504481f100a09c4dc148cb693293a67af24dd40d2b13a9e36794" + +const p256TestKeyHex = "98520456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b7754b8560456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b6030108078861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const p256TestKeyPrivateHex = "94a50456e5b83813082a8648ce3d030107020304a2072cd6d21321266c758cc5b83fab0510f751cb8d91897cddb7047d8d6f185546e2107111b0a95cb8ef063c33245502af7a65f004d5919d93ee74eb71a66253fe070302f0c2bfb0b6c30f87ee1599472b8636477eab23ced13b271886a4b50ed34c9d8436af5af5b8f88921f0efba6ef8c37c459bbb88bc1c6a13bbd25c4ce9b1e97679569ee77645d469bf4b43de637f5561b424502d3235362054657374204b6579203c696e76616c6964406578616d706c652e636f6d3e8879041313080021050256e5b838021b03050b09080702061508090a0b020416020301021e01021780000a0910d44a2c495918513e54e50100dfa64f97d9b47766fc1943c6314ba3f2b2a103d71ad286dc5b1efb96a345b0c80100dbc8150b54241f559da6ef4baacea6d31902b4f4b1bdc09b34bf0502334b77549ca90456e5b83812082a8648ce3d030107020304bfe3cea9cee13486f8d518aa487fecab451f25467d2bf08e58f63e5fa525d5482133e6a79299c274b068ef0be448152ad65cf11cf764348588ca4f6a0bcf22b603010807fe0703027510012471a603cfee2968dce19f732721ddf03e966fd133b4e3c7a685b788705cbc46fb026dc94724b830c9edbaecd2fb2c662f23169516cacd1fe423f0475c364ecc10abcabcfd4bbbda1a36a1bd8861041813080009050256e5b838021b0c000a0910d44a2c495918513e4a4800ff49d589fa64024ad30be363a032e3a0e0e6f5db56ba4c73db850518bf0121b8f20100fd78e065f4c70ea5be9df319ea67e493b936fc78da834a71828043d3154af56e" + +const armoredPrivateKeyBlock = `-----BEGIN PGP PRIVATE KEY BLOCK----- +Version: GnuPG v1.4.10 (GNU/Linux) + +lQHYBE2rFNoBBADFwqWQIW/DSqcB4yCQqnAFTJ27qS5AnB46ccAdw3u4Greeu3Bp +idpoHdjULy7zSKlwR1EA873dO/k/e11Ml3dlAFUinWeejWaK2ugFP6JjiieSsrKn +vWNicdCS4HTWn0X4sjl0ZiAygw6GNhqEQ3cpLeL0g8E9hnYzJKQ0LWJa0QARAQAB +AAP/TB81EIo2VYNmTq0pK1ZXwUpxCrvAAIG3hwKjEzHcbQznsjNvPUihZ+NZQ6+X +0HCfPAdPkGDCLCb6NavcSW+iNnLTrdDnSI6+3BbIONqWWdRDYJhqZCkqmG6zqSfL +IdkJgCw94taUg5BWP/AAeQrhzjChvpMQTVKQL5mnuZbUCeMCAN5qrYMP2S9iKdnk +VANIFj7656ARKt/nf4CBzxcpHTyB8+d2CtPDKCmlJP6vL8t58Jmih+kHJMvC0dzn +gr5f5+sCAOOe5gt9e0am7AvQWhdbHVfJU0TQJx+m2OiCJAqGTB1nvtBLHdJnfdC9 +TnXXQ6ZXibqLyBies/xeY2sCKL5qtTMCAKnX9+9d/5yQxRyrQUHt1NYhaXZnJbHx +q4ytu0eWz+5i68IYUSK69jJ1NWPM0T6SkqpB3KCAIv68VFm9PxqG1KmhSrQIVGVz +dCBLZXmIuAQTAQIAIgUCTasU2gIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AA +CgkQO9o98PRieSoLhgQAkLEZex02Qt7vGhZzMwuN0R22w3VwyYyjBx+fM3JFETy1 +ut4xcLJoJfIaF5ZS38UplgakHG0FQ+b49i8dMij0aZmDqGxrew1m4kBfjXw9B/v+ +eIqpODryb6cOSwyQFH0lQkXC040pjq9YqDsO5w0WYNXYKDnzRV0p4H1pweo2VDid +AdgETasU2gEEAN46UPeWRqKHvA99arOxee38fBt2CI08iiWyI8T3J6ivtFGixSqV +bRcPxYO/qLpVe5l84Nb3X71GfVXlc9hyv7CD6tcowL59hg1E/DC5ydI8K8iEpUmK +/UnHdIY5h8/kqgGxkY/T/hgp5fRQgW1ZoZxLajVlMRZ8W4tFtT0DeA+JABEBAAEA +A/0bE1jaaZKj6ndqcw86jd+QtD1SF+Cf21CWRNeLKnUds4FRRvclzTyUMuWPkUeX +TaNNsUOFqBsf6QQ2oHUBBK4VCHffHCW4ZEX2cd6umz7mpHW6XzN4DECEzOVksXtc +lUC1j4UB91DC/RNQqwX1IV2QLSwssVotPMPqhOi0ZLNY7wIA3n7DWKInxYZZ4K+6 +rQ+POsz6brEoRHwr8x6XlHenq1Oki855pSa1yXIARoTrSJkBtn5oI+f8AzrnN0BN +oyeQAwIA/7E++3HDi5aweWrViiul9cd3rcsS0dEnksPhvS0ozCJiHsq/6GFmy7J8 +QSHZPteedBnZyNp5jR+H7cIfVN3KgwH/Skq4PsuPhDq5TKK6i8Pc1WW8MA6DXTdU +nLkX7RGmMwjC0DBf7KWAlPjFaONAX3a8ndnz//fy1q7u2l9AZwrj1qa1iJ8EGAEC +AAkFAk2rFNoCGwwACgkQO9o98PRieSo2/QP/WTzr4ioINVsvN1akKuekmEMI3LAp +BfHwatufxxP1U+3Si/6YIk7kuPB9Hs+pRqCXzbvPRrI8NHZBmc8qIGthishdCYad +AHcVnXjtxrULkQFGbGvhKURLvS9WnzD/m1K2zzwxzkPTzT9/Yf06O6Mal5AdugPL +VrM0m72/jnpKo04= +=zNCn +-----END PGP PRIVATE KEY BLOCK-----` + +const e2ePublicKey = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Charset: UTF-8 + +xv8AAABSBAAAAAATCCqGSM49AwEHAgME1LRoXSpOxtHXDUdmuvzchyg6005qIBJ4 +sfaSxX7QgH9RV2ONUhC+WiayCNADq+UMzuR/vunSr4aQffXvuGnR383/AAAAFDxk +Z2lsQHlhaG9vLWluYy5jb20+wv8AAACGBBATCAA4/wAAAAWCVGvAG/8AAAACiwn/ +AAAACZC2VkQCOjdvYf8AAAAFlQgJCgv/AAAAA5YBAv8AAAACngEAAE1BAP0X8veD +24IjmI5/C6ZAfVNXxgZZFhTAACFX75jUA3oD6AEAzoSwKf1aqH6oq62qhCN/pekX ++WAsVMBhNwzLpqtCRjLO/wAAAFYEAAAAABIIKoZIzj0DAQcCAwT50ain7vXiIRv8 +B1DO3x3cE/aattZ5sHNixJzRCXi2vQIA5QmOxZ6b5jjUekNbdHG3SZi1a2Ak5mfX +fRxC/5VGAwEIB8L/AAAAZQQYEwgAGP8AAAAFglRrwBz/AAAACZC2VkQCOjdvYQAA +FJAA9isX3xtGyMLYwp2F3nXm7QEdY5bq5VUcD/RJlj792VwA/1wH0pCzVLl4Q9F9 +ex7En5r7rHR5xwX82Msc+Rq9dSyO +=7MrZ +-----END PGP PUBLIC KEY BLOCK-----` + +const dsaKeyWithSHA512 = `9901a2044f04b07f110400db244efecc7316553ee08d179972aab87bb1214de7692593fcf5b6feb1c80fba268722dd464748539b85b81d574cd2d7ad0ca2444de4d849b8756bad7768c486c83a824f9bba4af773d11742bdfb4ac3b89ef8cc9452d4aad31a37e4b630d33927bff68e879284a1672659b8b298222fc68f370f3e24dccacc4a862442b9438b00a0ea444a24088dc23e26df7daf8f43cba3bffc4fe703fe3d6cd7fdca199d54ed8ae501c30e3ec7871ea9cdd4cf63cfe6fc82281d70a5b8bb493f922cd99fba5f088935596af087c8d818d5ec4d0b9afa7f070b3d7c1dd32a84fca08d8280b4890c8da1dde334de8e3cad8450eed2a4a4fcc2db7b8e5528b869a74a7f0189e11ef097ef1253582348de072bb07a9fa8ab838e993cef0ee203ff49298723e2d1f549b00559f886cd417a41692ce58d0ac1307dc71d85a8af21b0cf6eaa14baf2922d3a70389bedf17cc514ba0febbd107675a372fe84b90162a9e88b14d4b1c6be855b96b33fb198c46f058568817780435b6936167ebb3724b680f32bf27382ada2e37a879b3d9de2abe0c3f399350afd1ad438883f4791e2e3b4184453412068617368207472756e636174696f6e207465737488620413110a002205024f04b07f021b03060b090807030206150802090a0b0416020301021e01021780000a0910ef20e0cefca131581318009e2bf3bf047a44d75a9bacd00161ee04d435522397009a03a60d51bd8a568c6c021c8d7cf1be8d990d6417b0020003` + +const unknownHashFunctionHex = `8a00000040040001990006050253863c24000a09103b4fe6acc0b21f32ffff01010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101010101` + +const rsaSignatureBadMPIlength = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101010101` + +const missingHashFunctionHex = `8a00000040040001030006050253863c24000a09103b4fe6acc0b21f32ffff0101010101010101010101010101010101010101010101010101010101010101010101` + +const campbellQuine = `a0b001000300fcffa0b001000d00f2ff000300fcffa0b001000d00f2ff8270a01c00000500faff8270a01c00000500faff000500faff001400ebff8270a01c00000500faff000500faff001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400001400ebff428821c400000000ffff000000ffff000b00f4ff428821c400000000ffff000000ffff000b00f4ff0233214c40000100feff000233214c40000100feff0000` + +const keyV4forVerifyingSignedMessageV3 = `-----BEGIN PGP PUBLIC KEY BLOCK----- +Comment: GPGTools - https://gpgtools.org + +mI0EVfxoFQEEAMBIqmbDfYygcvP6Phr1wr1XI41IF7Qixqybs/foBF8qqblD9gIY +BKpXjnBOtbkcVOJ0nljd3/sQIfH4E0vQwK5/4YRQSI59eKOqd6Fx+fWQOLG+uu6z +tewpeCj9LLHvibx/Sc7VWRnrznia6ftrXxJ/wHMezSab3tnGC0YPVdGNABEBAAG0 +JEdvY3J5cHRvIFRlc3QgS2V5IDx0aGVtYXhAZ21haWwuY29tPoi5BBMBCgAjBQJV +/GgVAhsDBwsJCAcDAgEGFQgCCQoLBBYCAwECHgECF4AACgkQeXnQmhdGW9PFVAP+ +K7TU0qX5ArvIONIxh/WAweyOk884c5cE8f+3NOPOOCRGyVy0FId5A7MmD5GOQh4H +JseOZVEVCqlmngEvtHZb3U1VYtVGE5WZ+6rQhGsMcWP5qaT4soYwMBlSYxgYwQcx +YhN9qOr292f9j2Y//TTIJmZT4Oa+lMxhWdqTfX+qMgG4jQRV/GgVAQQArhFSiij1 +b+hT3dnapbEU+23Z1yTu1DfF6zsxQ4XQWEV3eR8v+8mEDDNcz8oyyF56k6UQ3rXi +UMTIwRDg4V6SbZmaFbZYCOwp/EmXJ3rfhm7z7yzXj2OFN22luuqbyVhuL7LRdB0M +pxgmjXb4tTvfgKd26x34S+QqUJ7W6uprY4sAEQEAAYifBBgBCgAJBQJV/GgVAhsM +AAoJEHl50JoXRlvT7y8D/02ckx4OMkKBZo7viyrBw0MLG92i+DC2bs35PooHR6zz +786mitjOp5z2QWNLBvxC70S0qVfCIz8jKupO1J6rq6Z8CcbLF3qjm6h1omUBf8Nd +EfXKD2/2HV6zMKVknnKzIEzauh+eCKS2CeJUSSSryap/QLVAjRnckaES/OsEWhNB +=RZia +-----END PGP PUBLIC KEY BLOCK----- +` + +const signedMessageV3 = `-----BEGIN PGP MESSAGE----- +Comment: GPGTools - https://gpgtools.org + +owGbwMvMwMVYWXlhlrhb9GXG03JJDKF/MtxDMjKLFYAoUaEktbhEITe1uDgxPVWP +q5NhKjMrWAVcC9evD8z/bF/uWNjqtk/X3y5/38XGRQHm/57rrDRYuGnTw597Xqka +uM3137/hH3Os+Jf2dc0fXOITKwJvXJvecPVs0ta+Vg7ZO1MLn8w58Xx+6L58mbka +DGHyU9yTueZE8D+QF/Tz28Y78dqtF56R1VPn9Xw4uJqrWYdd7b3vIZ1V6R4Nh05d +iT57d/OhWwA= +=hG7R +-----END PGP MESSAGE----- +` + +// https://mailarchive.ietf.org/arch/msg/openpgp/9SheW_LENE0Kxf7haNllovPyAdY/ +const v5PrivKey = `-----BEGIN PGP PRIVATE KEY BLOCK----- + +lGEFXJH05BYAAAAtCSsGAQQB2kcPAQEHQFhZlVcVVtwf+21xNQPX+ecMJJBL0MPd +fj75iux+my8QAAAAAAAiAQCHZ1SnSUmWqxEsoI6facIVZQu6mph3cBFzzTvcm5lA +Ng5ctBhlbW1hLmdvbGRtYW5AZXhhbXBsZS5uZXSIlgUTFggASCIhBRk0e8mHJGQC +X5nfPsLgAA7ZiEiS4fez6kyUAJFZVptUBQJckfTkAhsDBQsJCAcCAyICAQYVCgkI +CwIEFgIDAQIeBwIXgAAA9cAA/jiR3yMsZMeEQ40u6uzEoXa6UXeV/S3wwJAXRJy9 +M8s0AP9vuL/7AyTfFXwwzSjDnYmzS0qAhbLDQ643N+MXGBJ2BZxmBVyR9OQSAAAA +MgorBgEEAZdVAQUBAQdA+nysrzml2UCweAqtpDuncSPlvrcBWKU0yfU0YvYWWAoD +AQgHAAAAAAAiAP9OdAPppjU1WwpqjIItkxr+VPQRT8Zm/Riw7U3F6v3OiBFHiHoF +GBYIACwiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVAUCXJH05AIb +DAAAOSQBAP4BOOIR/sGLNMOfeb5fPs/02QMieoiSjIBnijhob2U5AQC+RtOHCHx7 +TcIYl5/Uyoi+FOvPLcNw4hOv2nwUzSSVAw== +=IiS2 +-----END PGP PRIVATE KEY BLOCK-----` + +// Generated with the above private key +const v5PrivKeyMsg = `-----BEGIN PGP MESSAGE----- +Version: OpenPGP.js v4.10.7 +Comment: https://openpgpjs.org + +xA0DAQoWGTR7yYckZAIByxF1B21zZy50eHRfbIGSdGVzdMJ3BQEWCgAGBQJf +bIGSACMiIQUZNHvJhyRkAl+Z3z7C4AAO2YhIkuH3s+pMlACRWVabVDQvAP9G +y29VPonFXqi2zKkpZrvyvZxg+n5e8Nt9wNbuxeCd3QD/TtO2s+JvjrE4Siwv +UQdl5MlBka1QSNbMq2Bz7XwNPg4= +=6lbM +-----END PGP MESSAGE-----` diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go new file mode 100644 index 0000000000..14f58548bd --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/s2k/s2k.go @@ -0,0 +1,367 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package s2k implements the various OpenPGP string-to-key transforms as +// specified in RFC 4800 section 3.7.1. +package s2k // import "github.com/ProtonMail/go-crypto/openpgp/s2k" + +import ( + "crypto" + "hash" + "io" + "strconv" + + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/internal/algorithm" +) + +// Config collects configuration parameters for s2k key-stretching +// transformations. A nil *Config is valid and results in all default +// values. Currently, Config is used only by the Serialize function in +// this package. +type Config struct { + // S2KMode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + S2KMode uint8 + // Hash is the default hash function to be used. If + // nil, SHA256 is used. + Hash crypto.Hash + // S2KCount is only used for symmetric encryption. It + // determines the strength of the passphrase stretching when + // the said passphrase is hashed to produce a key. S2KCount + // should be between 65536 and 65011712, inclusive. If Config + // is nil or S2KCount is 0, the value 16777216 used. Not all + // values in the above range can be represented. S2KCount will + // be rounded up to the next representable value if it cannot + // be encoded exactly. See RFC 4880 Section 3.7.1.3. + S2KCount int +} + +// Params contains all the parameters of the s2k packet +type Params struct { + // mode is the mode of s2k function. + // It can be 0 (simple), 1(salted), 3(iterated) + // 2(reserved) 100-110(private/experimental). + mode uint8 + // hashId is the ID of the hash function used in any of the modes + hashId byte + // salt is a byte array to use as a salt in hashing process + salt []byte + // countByte is used to determine how many rounds of hashing are to + // be performed in s2k mode 3. See RFC 4880 Section 3.7.1.3. + countByte byte +} + +func (c *Config) hash() crypto.Hash { + if c == nil || uint(c.Hash) == 0 { + return crypto.SHA256 + } + + return c.Hash +} + +// EncodedCount get encoded count +func (c *Config) EncodedCount() uint8 { + if c == nil || c.S2KCount == 0 { + return 224 // The common case. Corresponding to 16777216 + } + + i := c.S2KCount + + switch { + case i < 65536: + i = 65536 + case i > 65011712: + i = 65011712 + } + + return encodeCount(i) +} + +// encodeCount converts an iterative "count" in the range 1024 to +// 65011712, inclusive, to an encoded count. The return value is the +// octet that is actually stored in the GPG file. encodeCount panics +// if i is not in the above range (encodedCount above takes care to +// pass i in the correct range). See RFC 4880 Section 3.7.7.1. +func encodeCount(i int) uint8 { + if i < 65536 || i > 65011712 { + panic("count arg i outside the required range") + } + + for encoded := 96; encoded < 256; encoded++ { + count := decodeCount(uint8(encoded)) + if count >= i { + return uint8(encoded) + } + } + + return 255 +} + +// decodeCount returns the s2k mode 3 iterative "count" corresponding to +// the encoded octet c. +func decodeCount(c uint8) int { + return (16 + int(c&15)) << (uint32(c>>4) + 6) +} + +// Simple writes to out the result of computing the Simple S2K function (RFC +// 4880, section 3.7.1.1) using the given hash and input passphrase. +func Simple(out []byte, h hash.Hash, in []byte) { + Salted(out, h, in, nil) +} + +var zero [1]byte + +// Salted writes to out the result of computing the Salted S2K function (RFC +// 4880, section 3.7.1.2) using the given hash, input passphrase and salt. +func Salted(out []byte, h hash.Hash, in []byte, salt []byte) { + done := 0 + var digest []byte + + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + h.Write(salt) + h.Write(in) + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Iterated writes to out the result of computing the Iterated and Salted S2K +// function (RFC 4880, section 3.7.1.3) using the given hash, input passphrase, +// salt and iteration count. +func Iterated(out []byte, h hash.Hash, in []byte, salt []byte, count int) { + combined := make([]byte, len(in)+len(salt)) + copy(combined, salt) + copy(combined[len(salt):], in) + + if count < len(combined) { + count = len(combined) + } + + done := 0 + var digest []byte + for i := 0; done < len(out); i++ { + h.Reset() + for j := 0; j < i; j++ { + h.Write(zero[:]) + } + written := 0 + for written < count { + if written+len(combined) > count { + todo := count - written + h.Write(combined[:todo]) + written = count + } else { + h.Write(combined) + written += len(combined) + } + } + digest = h.Sum(digest[:0]) + n := copy(out[done:], digest) + done += n + } +} + +// Generate generates valid parameters from given configuration. +// It will enforce salted + hashed s2k method +func Generate(rand io.Reader, c *Config) (*Params, error) { + hashId, ok := HashToHashId(c.Hash) + if !ok { + return nil, errors.UnsupportedError("no such hash") + } + + params := &Params{ + mode: 3, // Enforce iterared + salted method + hashId: hashId, + salt: make([]byte, 8), + countByte: c.EncodedCount(), + } + + if _, err := io.ReadFull(rand, params.salt); err != nil { + return nil, err + } + + return params, nil +} + +// Parse reads a binary specification for a string-to-key transformation from r +// and returns a function which performs that transform. If the S2K is a special +// GNU extension that indicates that the private key is missing, then the error +// returned is errors.ErrDummyPrivateKey. +func Parse(r io.Reader) (f func(out, in []byte), err error) { + params, err := ParseIntoParams(r) + if err != nil { + return nil, err + } + + return params.Function() +} + +// ParseIntoParams reads a binary specification for a string-to-key +// transformation from r and returns a struct describing the s2k parameters. +func ParseIntoParams(r io.Reader) (params *Params, err error) { + var buf [9]byte + + _, err = io.ReadFull(r, buf[:2]) + if err != nil { + return + } + + params = &Params{ + mode: buf[0], + hashId: buf[1], + } + + switch params.mode { + case 0: + return params, nil + case 1: + _, err = io.ReadFull(r, buf[:8]) + if err != nil { + return nil, err + } + + params.salt = buf[:8] + return params, nil + case 3: + _, err = io.ReadFull(r, buf[:9]) + if err != nil { + return nil, err + } + + params.salt = buf[:8] + params.countByte = buf[8] + return params, nil + case 101: + // This is a GNU extension. See + // https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=doc/DETAILS;h=fe55ae16ab4e26d8356dc574c9e8bc935e71aef1;hb=23191d7851eae2217ecdac6484349849a24fd94a#l1109 + if _, err = io.ReadFull(r, buf[:4]); err != nil { + return nil, err + } + if buf[0] == 'G' && buf[1] == 'N' && buf[2] == 'U' && buf[3] == 1 { + return params, nil + } + return nil, errors.UnsupportedError("GNU S2K extension") + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Dummy() bool { + return params != nil && params.mode == 101 +} + +func (params *Params) Function() (f func(out, in []byte), err error) { + if params.Dummy() { + return nil, errors.ErrDummyPrivateKey("dummy key found") + } + hashObj, ok := HashIdToHash(params.hashId) + if !ok { + return nil, errors.UnsupportedError("hash for S2K function: " + strconv.Itoa(int(params.hashId))) + } + if !hashObj.Available() { + return nil, errors.UnsupportedError("hash not available: " + strconv.Itoa(int(hashObj))) + } + + switch params.mode { + case 0: + f := func(out, in []byte) { + Simple(out, hashObj.New(), in) + } + + return f, nil + case 1: + f := func(out, in []byte) { + Salted(out, hashObj.New(), in, params.salt) + } + + return f, nil + case 3: + f := func(out, in []byte) { + Iterated(out, hashObj.New(), in, params.salt, decodeCount(params.countByte)) + } + + return f, nil + } + + return nil, errors.UnsupportedError("S2K function") +} + +func (params *Params) Serialize(w io.Writer) (err error) { + if _, err = w.Write([]byte{params.mode}); err != nil { + return + } + if _, err = w.Write([]byte{params.hashId}); err != nil { + return + } + if params.Dummy() { + _, err = w.Write(append([]byte("GNU"), 1)) + return + } + if params.mode > 0 { + if _, err = w.Write(params.salt); err != nil { + return + } + if params.mode == 3 { + _, err = w.Write([]byte{params.countByte}) + } + } + return +} + +// Serialize salts and stretches the given passphrase and writes the +// resulting key into key. It also serializes an S2K descriptor to +// w. The key stretching can be configured with c, which may be +// nil. In that case, sensible defaults will be used. +func Serialize(w io.Writer, key []byte, rand io.Reader, passphrase []byte, c *Config) error { + params, err := Generate(rand, c) + if err != nil { + return err + } + err = params.Serialize(w) + if err != nil { + return err + } + + f, err := params.Function() + if err != nil { + return err + } + f(key, passphrase) + return nil +} + +// HashIdToHash returns a crypto.Hash which corresponds to the given OpenPGP +// hash id. +func HashIdToHash(id byte) (h crypto.Hash, ok bool) { + if hash, ok := algorithm.HashById[id]; ok { + return hash.HashFunc(), true + } + return 0, false +} + +// HashIdToString returns the name of the hash function corresponding to the +// given OpenPGP hash id. +func HashIdToString(id byte) (name string, ok bool) { + if hash, ok := algorithm.HashById[id]; ok { + return hash.String(), true + } + return "", false +} + +// HashIdToHash returns an OpenPGP hash id which corresponds the given Hash. +func HashToHashId(h crypto.Hash) (id byte, ok bool) { + for id, hash := range algorithm.HashById { + if hash.HashFunc() == h { + return id, true + } + } + return 0, false +} diff --git a/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go new file mode 100644 index 0000000000..bb74b13872 --- /dev/null +++ b/vendor/github.com/ProtonMail/go-crypto/openpgp/write.go @@ -0,0 +1,559 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package openpgp + +import ( + "crypto" + "hash" + "io" + "strconv" + "time" + + "github.com/ProtonMail/go-crypto/openpgp/armor" + "github.com/ProtonMail/go-crypto/openpgp/errors" + "github.com/ProtonMail/go-crypto/openpgp/packet" + "github.com/ProtonMail/go-crypto/openpgp/s2k" +) + +// DetachSign signs message with the private key from signer (which must +// already have been decrypted) and writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// ArmoredDetachSign signs message with the private key from signer (which +// must already have been decrypted) and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSign(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) (err error) { + return armoredDetachSign(w, signer, message, packet.SigTypeBinary, config) +} + +// DetachSignText signs message (after canonicalising the line endings) with +// the private key from signer (which must already have been decrypted) and +// writes the signature to w. +// If config is nil, sensible defaults will be used. +func DetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return detachSign(w, signer, message, packet.SigTypeText, config) +} + +// ArmoredDetachSignText signs message (after canonicalising the line endings) +// with the private key from signer (which must already have been decrypted) +// and writes an armored signature to w. +// If config is nil, sensible defaults will be used. +func ArmoredDetachSignText(w io.Writer, signer *Entity, message io.Reader, config *packet.Config) error { + return armoredDetachSign(w, signer, message, packet.SigTypeText, config) +} + +func armoredDetachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + out, err := armor.Encode(w, SignatureType, nil) + if err != nil { + return + } + err = detachSign(out, signer, message, sigType, config) + if err != nil { + return + } + return out.Close() +} + +func detachSign(w io.Writer, signer *Entity, message io.Reader, sigType packet.SignatureType, config *packet.Config) (err error) { + signingKey, ok := signer.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return errors.InvalidArgumentError("no valid signing keys") + } + if signingKey.PrivateKey == nil { + return errors.InvalidArgumentError("signing key doesn't have a private key") + } + if signingKey.PrivateKey.Encrypted { + return errors.InvalidArgumentError("signing key is encrypted") + } + + sig := new(packet.Signature) + sig.SigType = sigType + sig.PubKeyAlgo = signingKey.PrivateKey.PubKeyAlgo + sig.Hash = config.Hash() + sig.CreationTime = config.Now() + sigLifetimeSecs := config.SigLifetime() + sig.SigLifetimeSecs = &sigLifetimeSecs + sig.IssuerKeyId = &signingKey.PrivateKey.KeyId + + h, wrappedHash, err := hashForSignature(sig.Hash, sig.SigType) + if err != nil { + return + } + if _, err = io.Copy(wrappedHash, message); err != nil { + return err + } + + err = sig.Sign(h, signingKey.PrivateKey, config) + if err != nil { + return + } + + return sig.Serialize(w) +} + +// FileHints contains metadata about encrypted files. This metadata is, itself, +// encrypted. +type FileHints struct { + // IsBinary can be set to hint that the contents are binary data. + IsBinary bool + // FileName hints at the name of the file that should be written. It's + // truncated to 255 bytes if longer. It may be empty to suggest that the + // file should not be written to disk. It may be equal to "_CONSOLE" to + // suggest the data should not be written to disk. + FileName string + // ModTime contains the modification time of the file, or the zero time if not applicable. + ModTime time.Time +} + +// SymmetricallyEncrypt acts like gpg -c: it encrypts a file with a passphrase. +// The resulting WriteCloser must be closed after the contents of the file have +// been written. +// If config is nil, sensible defaults will be used. +func SymmetricallyEncrypt(ciphertext io.Writer, passphrase []byte, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + if hints == nil { + hints = &FileHints{} + } + + key, err := packet.SerializeSymmetricKeyEncrypted(ciphertext, passphrase, config) + if err != nil { + return + } + + var w io.WriteCloser + if config.AEAD() != nil { + w, err = packet.SerializeAEADEncrypted(ciphertext, key, config.Cipher(), config.AEAD().Mode(), config) + if err != nil { + return + } + } else { + w, err = packet.SerializeSymmetricallyEncrypted(ciphertext, config.Cipher(), key, config) + if err != nil { + return + } + } + + literalData := w + if algo := config.Compression(); algo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + literalData, err = packet.SerializeCompressed(w, algo, compConfig) + if err != nil { + return + } + } + + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + return packet.SerializeLiteral(literalData, hints.IsBinary, hints.FileName, epochSeconds) +} + +// intersectPreferences mutates and returns a prefix of a that contains only +// the values in the intersection of a and b. The order of a is preserved. +func intersectPreferences(a []uint8, b []uint8) (intersection []uint8) { + var j int + for _, v := range a { + for _, v2 := range b { + if v == v2 { + a[j] = v + j++ + break + } + } + } + + return a[:j] +} + +func hashToHashId(h crypto.Hash) uint8 { + v, ok := s2k.HashToHashId(h) + if !ok { + panic("tried to convert unknown hash") + } + return v +} + +// EncryptText encrypts a message to a number of recipients and, optionally, +// signs it. Optional information is contained in 'hints', also encrypted, that +// aids the recipients in processing the message. The resulting WriteCloser +// must be closed after the contents of the file have been written. If config +// is nil, sensible defaults will be used. The signing is done in text mode. +func EncryptText(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeText, config) +} + +// Encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(ciphertext, ciphertext, to, signed, hints, packet.SigTypeBinary, config) +} + +// EncryptSplit encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func EncryptSplit(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, config *packet.Config) (plaintext io.WriteCloser, err error) { + return encrypt(keyWriter, dataWriter, to, signed, hints, packet.SigTypeBinary, config) +} + +// writeAndSign writes the data as a payload package and, optionally, signs +// it. hints contains optional information, that is also encrypted, +// that aids the recipients in processing the message. The resulting +// WriteCloser must be closed after the contents of the file have been +// written. If config is nil, sensible defaults will be used. +func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + var signer *packet.PrivateKey + if signed != nil { + signKey, ok := signed.SigningKeyById(config.Now(), config.SigningKey()) + if !ok { + return nil, errors.InvalidArgumentError("no valid signing keys") + } + signer = signKey.PrivateKey + if signer == nil { + return nil, errors.InvalidArgumentError("no private key in signing key") + } + if signer.Encrypted { + return nil, errors.InvalidArgumentError("signing key must be decrypted") + } + } + + var hash crypto.Hash + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h.Available() { + hash = h + break + } + } + + // If the hash specified by config is a candidate, we'll use that. + if configuredHash := config.Hash(); configuredHash.Available() { + for _, hashId := range candidateHashes { + if h, ok := s2k.HashIdToHash(hashId); ok && h == configuredHash { + hash = h + break + } + } + } + + if hash == 0 { + hashId := candidateHashes[0] + name, ok := s2k.HashIdToString(hashId) + if !ok { + name = "#" + strconv.Itoa(int(hashId)) + } + return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)") + } + + if signer != nil { + ops := &packet.OnePassSignature{ + SigType: sigType, + Hash: hash, + PubKeyAlgo: signer.PubKeyAlgo, + KeyId: signer.KeyId, + IsLast: true, + } + if err := ops.Serialize(payload); err != nil { + return nil, err + } + } + + if hints == nil { + hints = &FileHints{} + } + + w := payload + if signer != nil { + // If we need to write a signature packet after the literal + // data then we need to stop literalData from closing + // encryptedData. + w = noOpCloser{w} + + } + var epochSeconds uint32 + if !hints.ModTime.IsZero() { + epochSeconds = uint32(hints.ModTime.Unix()) + } + literalData, err := packet.SerializeLiteral(w, hints.IsBinary, hints.FileName, epochSeconds) + if err != nil { + return nil, err + } + + if signer != nil { + h, wrappedHash, err := hashForSignature(hash, sigType) + if err != nil { + return nil, err + } + metadata := &packet.LiteralData{ + Format: 't', + FileName: hints.FileName, + Time: epochSeconds, + } + if hints.IsBinary { + metadata.Format = 'b' + } + return signatureWriter{payload, literalData, hash, wrappedHash, h, signer, sigType, config, metadata}, nil + } + return literalData, nil +} + +// encrypt encrypts a message to a number of recipients and, optionally, signs +// it. hints contains optional information, that is also encrypted, that aids +// the recipients in processing the message. The resulting WriteCloser must +// be closed after the contents of the file have been written. +// If config is nil, sensible defaults will be used. +func encrypt(keyWriter io.Writer, dataWriter io.Writer, to []*Entity, signed *Entity, hints *FileHints, sigType packet.SignatureType, config *packet.Config) (plaintext io.WriteCloser, err error) { + if len(to) == 0 { + return nil, errors.InvalidArgumentError("no encryption recipient provided") + } + + // These are the possible ciphers that we'll use for the message. + candidateCiphers := []uint8{ + uint8(packet.CipherAES128), + uint8(packet.CipherAES256), + uint8(packet.CipherCAST5), + } + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + candidateAeadModes := []uint8{ + uint8(packet.AEADModeEAX), + uint8(packet.AEADModeOCB), + uint8(packet.AEADModeExperimentalGCM), + } + candidateCompression := []uint8{ + uint8(packet.CompressionNone), + uint8(packet.CompressionZIP), + uint8(packet.CompressionZLIB), + } + // In the event that a recipient doesn't specify any supported ciphers + // or hash functions, these are the ones that we assume that every + // implementation supports. + defaultCiphers := candidateCiphers[0:1] + defaultHashes := candidateHashes[0:1] + defaultAeadModes := candidateAeadModes[0:1] + defaultCompression := candidateCompression[0:1] + + encryptKeys := make([]Key, len(to)) + // AEAD is used only if every key supports it. + aeadSupported := true + + for i := range to { + var ok bool + encryptKeys[i], ok = to[i].EncryptionKey(config.Now()) + if !ok { + return nil, errors.InvalidArgumentError("cannot encrypt a message to key id " + strconv.FormatUint(to[i].PrimaryKey.KeyId, 16) + " because it has no encryption keys") + } + + sig := to[i].PrimaryIdentity().SelfSignature + if sig.AEAD == false { + aeadSupported = false + } + + preferredSymmetric := sig.PreferredSymmetric + if len(preferredSymmetric) == 0 { + preferredSymmetric = defaultCiphers + } + preferredHashes := sig.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + preferredAeadModes := sig.PreferredAEAD + if len(preferredAeadModes) == 0 { + preferredAeadModes = defaultAeadModes + } + preferredCompression := sig.PreferredCompression + if len(preferredCompression) == 0 { + preferredCompression = defaultCompression + } + candidateCiphers = intersectPreferences(candidateCiphers, preferredSymmetric) + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + candidateAeadModes = intersectPreferences(candidateAeadModes, preferredAeadModes) + candidateCompression = intersectPreferences(candidateCompression, preferredCompression) + } + + if len(candidateCiphers) == 0 || len(candidateHashes) == 0 || len(candidateAeadModes) == 0 { + return nil, errors.InvalidArgumentError("cannot encrypt because recipient set shares no common algorithms") + } + + cipher := packet.CipherFunction(candidateCiphers[0]) + mode := packet.AEADMode(candidateAeadModes[0]) + // If the cipher specified by config is a candidate, we'll use that. + configuredCipher := config.Cipher() + for _, c := range candidateCiphers { + cipherFunc := packet.CipherFunction(c) + if cipherFunc == configuredCipher { + cipher = cipherFunc + break + } + } + + symKey := make([]byte, cipher.KeySize()) + if _, err := io.ReadFull(config.Random(), symKey); err != nil { + return nil, err + } + + for _, key := range encryptKeys { + if err := packet.SerializeEncryptedKey(keyWriter, key.PublicKey, cipher, symKey, config); err != nil { + return nil, err + } + } + + var payload io.WriteCloser + if aeadSupported { + payload, err = packet.SerializeAEADEncrypted(dataWriter, symKey, cipher, mode, config) + if err != nil { + return + } + } else { + payload, err = packet.SerializeSymmetricallyEncrypted(dataWriter, cipher, symKey, config) + if err != nil { + return + } + } + payload, err = handleCompression(payload, candidateCompression, config) + if err != nil { + return nil, err + } + + return writeAndSign(payload, candidateHashes, signed, hints, sigType, config) +} + +// Sign signs a message. The resulting WriteCloser must be closed after the +// contents of the file have been written. hints contains optional information +// that aids the recipients in processing the message. +// If config is nil, sensible defaults will be used. +func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Config) (input io.WriteCloser, err error) { + if signed == nil { + return nil, errors.InvalidArgumentError("no signer provided") + } + + // These are the possible hash functions that we'll use for the signature. + candidateHashes := []uint8{ + hashToHashId(crypto.SHA256), + hashToHashId(crypto.SHA384), + hashToHashId(crypto.SHA512), + hashToHashId(crypto.SHA1), + hashToHashId(crypto.RIPEMD160), + } + defaultHashes := candidateHashes[0:1] + preferredHashes := signed.PrimaryIdentity().SelfSignature.PreferredHash + if len(preferredHashes) == 0 { + preferredHashes = defaultHashes + } + candidateHashes = intersectPreferences(candidateHashes, preferredHashes) + if len(candidateHashes) == 0 { + return nil, errors.InvalidArgumentError("cannot sign because signing key shares no common algorithms with candidate hashes") + } + + return writeAndSign(noOpCloser{output}, candidateHashes, signed, hints, packet.SigTypeBinary, config) +} + +// signatureWriter hashes the contents of a message while passing it along to +// literalData. When closed, it closes literalData, writes a signature packet +// to encryptedData and then also closes encryptedData. +type signatureWriter struct { + encryptedData io.WriteCloser + literalData io.WriteCloser + hashType crypto.Hash + wrappedHash hash.Hash + h hash.Hash + signer *packet.PrivateKey + sigType packet.SignatureType + config *packet.Config + metadata *packet.LiteralData // V5 signatures protect document metadata +} + +func (s signatureWriter) Write(data []byte) (int, error) { + s.wrappedHash.Write(data) + switch s.sigType { + case packet.SigTypeBinary: + return s.literalData.Write(data) + case packet.SigTypeText: + flag := 0 + return writeCanonical(s.literalData, data, &flag) + } + return 0, errors.UnsupportedError("unsupported signature type: " + strconv.Itoa(int(s.sigType))) +} + +func (s signatureWriter) Close() error { + sig := &packet.Signature{ + Version: s.signer.Version, + SigType: s.sigType, + PubKeyAlgo: s.signer.PubKeyAlgo, + Hash: s.hashType, + CreationTime: s.config.Now(), + IssuerKeyId: &s.signer.KeyId, + Metadata: s.metadata, + } + + if err := sig.Sign(s.h, s.signer, s.config); err != nil { + return err + } + if err := s.literalData.Close(); err != nil { + return err + } + if err := sig.Serialize(s.encryptedData); err != nil { + return err + } + return s.encryptedData.Close() +} + +// noOpCloser is like an ioutil.NopCloser, but for an io.Writer. +// TODO: we have two of these in OpenPGP packages alone. This probably needs +// to be promoted somewhere more common. +type noOpCloser struct { + w io.Writer +} + +func (c noOpCloser) Write(data []byte) (n int, err error) { + return c.w.Write(data) +} + +func (c noOpCloser) Close() error { + return nil +} + +func handleCompression(compressed io.WriteCloser, candidateCompression []uint8, config *packet.Config) (data io.WriteCloser, err error) { + data = compressed + confAlgo := config.Compression() + if confAlgo == packet.CompressionNone { + return + } + finalAlgo := packet.CompressionNone + // if compression specified by config available we will use it + for _, c := range candidateCompression { + if uint8(confAlgo) == c { + finalAlgo = confAlgo + break + } + } + + if finalAlgo != packet.CompressionNone { + var compConfig *packet.CompressionConfig + if config != nil { + compConfig = config.CompressionConfig + } + data, err = packet.SerializeCompressed(compressed, finalAlgo, compConfig) + if err != nil { + return + } + } + return data, nil +} diff --git a/vendor/github.com/acomagu/bufpipe/README.md b/vendor/github.com/acomagu/bufpipe/README.md new file mode 100644 index 0000000000..19df083142 --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/README.md @@ -0,0 +1,42 @@ +# bufpipe: Buffered Pipe + +[![CircleCI](https://img.shields.io/circleci/build/github/acomagu/bufpipe.svg?style=flat-square)](https://circleci.com/gh/acomagu/bufpipe) [![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/acomagu/bufpipe) + +The buffered version of io.Pipe. It's safe for concurrent use. + +## How does it differ from io.Pipe? + +Writes never block because the pipe has variable-sized buffer. + +```Go +r, w := bufpipe.New(nil) +io.WriteString(w, "abc") // No blocking. +io.WriteString(w, "def") // No blocking, too. +w.Close() +io.Copy(os.Stdout, r) +// Output: abcdef +``` + +[Playground](https://play.golang.org/p/PdyBAS3pVob) + +## How does it differ from bytes.Buffer? + +Reads block if the internal buffer is empty until the writer is closed. + +```Go +r, w := bufpipe.New(nil) + +done := make(chan struct{}) +go func() { + io.Copy(os.Stdout, r) // The reads block until the writer is closed. + done <- struct{}{} +}() + +io.WriteString(w, "abc") +io.WriteString(w, "def") +w.Close() +<-done +// Output: abcdef +``` + +[Playground](https://play.golang.org/p/UppmyLeRgX6) diff --git a/vendor/github.com/acomagu/bufpipe/bufpipe.go b/vendor/github.com/acomagu/bufpipe/bufpipe.go new file mode 100644 index 0000000000..846dbcc290 --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/bufpipe.go @@ -0,0 +1,128 @@ +package bufpipe + +import ( + "bytes" + "errors" + "io" + "sync" +) + +// ErrClosedPipe is the error used for read or write operations on a closed pipe. +var ErrClosedPipe = errors.New("bufpipe: read/write on closed pipe") + +type pipe struct { + cond *sync.Cond + buf *bytes.Buffer + rerr, werr error +} + +// A PipeReader is the read half of a pipe. +type PipeReader struct { + *pipe +} + +// A PipeWriter is the write half of a pipe. +type PipeWriter struct { + *pipe +} + +// New creates a synchronous pipe using buf as its initial contents. It can be +// used to connect code expecting an io.Reader with code expecting an io.Writer. +// +// Unlike io.Pipe, writes never block because the internal buffer has variable +// size. Reads block only when the buffer is empty. +// +// It is safe to call Read and Write in parallel with each other or with Close. +// Parallel calls to Read and parallel calls to Write are also safe: the +// individual calls will be gated sequentially. +// +// The new pipe takes ownership of buf, and the caller should not use buf after +// this call. New is intended to prepare a PipeReader to read existing data. It +// can also be used to set the initial size of the internal buffer for writing. +// To do that, buf should have the desired capacity but a length of zero. +func New(buf []byte) (*PipeReader, *PipeWriter) { + p := &pipe{ + buf: bytes.NewBuffer(buf), + cond: sync.NewCond(new(sync.Mutex)), + } + return &PipeReader{ + pipe: p, + }, &PipeWriter{ + pipe: p, + } +} + +// Read implements the standard Read interface: it reads data from the pipe, +// reading from the internal buffer, otherwise blocking until a writer arrives +// or the write end is closed. If the write end is closed with an error, that +// error is returned as err; otherwise err is io.EOF. +func (r *PipeReader) Read(data []byte) (int, error) { + r.cond.L.Lock() + defer r.cond.L.Unlock() + +RETRY: + n, err := r.buf.Read(data) + // If not closed and no read, wait for writing. + if err == io.EOF && r.rerr == nil && n == 0 { + r.cond.Wait() + goto RETRY + } + if err == io.EOF { + return n, r.rerr + } + return n, err +} + +// Close closes the reader; subsequent writes from the write half of the pipe +// will return error ErrClosedPipe. +func (r *PipeReader) Close() error { + return r.CloseWithError(nil) +} + +// CloseWithError closes the reader; subsequent writes to the write half of the +// pipe will return the error err. +func (r *PipeReader) CloseWithError(err error) error { + r.cond.L.Lock() + defer r.cond.L.Unlock() + + if err == nil { + err = ErrClosedPipe + } + r.werr = err + return nil +} + +// Write implements the standard Write interface: it writes data to the internal +// buffer. If the read end is closed with an error, that err is returned as err; +// otherwise err is ErrClosedPipe. +func (w *PipeWriter) Write(data []byte) (int, error) { + w.cond.L.Lock() + defer w.cond.L.Unlock() + + if w.werr != nil { + return 0, w.werr + } + + n, err := w.buf.Write(data) + w.cond.Signal() + return n, err +} + +// Close closes the writer; subsequent reads from the read half of the pipe will +// return io.EOF once the internal buffer get empty. +func (w *PipeWriter) Close() error { + return w.CloseWithError(nil) +} + +// Close closes the writer; subsequent reads from the read half of the pipe will +// return err once the internal buffer get empty. +func (w *PipeWriter) CloseWithError(err error) error { + w.cond.L.Lock() + defer w.cond.L.Unlock() + + if err == nil { + err = io.EOF + } + w.rerr = err + return nil +} diff --git a/vendor/github.com/acomagu/bufpipe/doc.go b/vendor/github.com/acomagu/bufpipe/doc.go new file mode 100644 index 0000000000..16a3948001 --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/doc.go @@ -0,0 +1,2 @@ +// Package bufpipe provides a IO pipe, has variable-sized buffer. +package bufpipe diff --git a/vendor/github.com/acomagu/bufpipe/go.mod b/vendor/github.com/acomagu/bufpipe/go.mod new file mode 100644 index 0000000000..bab097a2f5 --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/go.mod @@ -0,0 +1,5 @@ +module github.com/acomagu/bufpipe + +go 1.12 + +require github.com/matryer/is v1.2.0 diff --git a/vendor/github.com/acomagu/bufpipe/go.sum b/vendor/github.com/acomagu/bufpipe/go.sum new file mode 100644 index 0000000000..6378c9bd6f --- /dev/null +++ b/vendor/github.com/acomagu/bufpipe/go.sum @@ -0,0 +1,2 @@ +github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= diff --git a/vendor/github.com/antlr/antlr4/LICENSE.txt b/vendor/github.com/antlr/antlr4/LICENSE.txt new file mode 100644 index 0000000000..2042d1bda6 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/LICENSE.txt @@ -0,0 +1,52 @@ +[The "BSD 3-clause license"] +Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES +OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT +NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF +THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +===== + +MIT License for codepointat.js from https://git.io/codepointat +MIT License for fromcodepoint.js from https://git.io/vDW1m + +Copyright Mathias Bynens + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go new file mode 100644 index 0000000000..1592212e14 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNInvalidAltNumber int + +type ATN struct { + // DecisionToState is the decision points for all rules, subrules, optional + // blocks, ()+, ()*, etc. Used to build DFA predictors for them. + DecisionToState []DecisionState + + // grammarType is the ATN type and is used for deserializing ATNs from strings. + grammarType int + + // lexerActions is referenced by action transitions in the ATN for lexer ATNs. + lexerActions []LexerAction + + // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN. + maxTokenType int + + modeNameToStartState map[string]*TokensStartState + + modeToStartState []*TokensStartState + + // ruleToStartState maps from rule index to starting state number. + ruleToStartState []*RuleStartState + + // ruleToStopState maps from rule index to stop state number. + ruleToStopState []*RuleStopState + + // ruleToTokenType maps the rule index to the resulting token type for lexer + // ATNs. For parser ATNs, it maps the rule index to the generated bypass token + // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was + // specified, and otherwise is nil. + ruleToTokenType []int + + states []ATNState +} + +func NewATN(grammarType int, maxTokenType int) *ATN { + return &ATN{ + grammarType: grammarType, + maxTokenType: maxTokenType, + modeNameToStartState: make(map[string]*TokensStartState), + } +} + +// NextTokensInContext computes the set of valid tokens that can occur starting +// in state s. If ctx is nil, the set of tokens will not include what can follow +// the rule surrounding s. In other words, the set will be restricted to tokens +// reachable staying within the rule of s. +func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet { + return NewLL1Analyzer(a).Look(s, nil, ctx) +} + +// NextTokensNoContext computes the set of valid tokens that can occur starting +// in s and staying in same rule. Token.EPSILON is in set if we reach end of +// rule. +func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet { + if s.GetNextTokenWithinRule() != nil { + return s.GetNextTokenWithinRule() + } + + s.SetNextTokenWithinRule(a.NextTokensInContext(s, nil)) + s.GetNextTokenWithinRule().readOnly = true + + return s.GetNextTokenWithinRule() +} + +func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet { + if ctx == nil { + return a.NextTokensNoContext(s) + } + + return a.NextTokensInContext(s, ctx) +} + +func (a *ATN) addState(state ATNState) { + if state != nil { + state.SetATN(a) + state.SetStateNumber(len(a.states)) + } + + a.states = append(a.states, state) +} + +func (a *ATN) removeState(state ATNState) { + a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice +} + +func (a *ATN) defineDecisionState(s DecisionState) int { + a.DecisionToState = append(a.DecisionToState, s) + s.setDecision(len(a.DecisionToState) - 1) + + return s.getDecision() +} + +func (a *ATN) getDecisionState(decision int) DecisionState { + if len(a.DecisionToState) == 0 { + return nil + } + + return a.DecisionToState[decision] +} + +// getExpectedTokens computes the set of input symbols which could follow ATN +// state number stateNumber in the specified full parse context ctx and returns +// the set of potentially valid input symbols which could follow the specified +// state in the specified context. This method considers the complete parser +// context, but does not evaluate semantic predicates (i.e. all predicates +// encountered during the calculation are assumed true). If a path in the ATN +// exists from the starting state to the RuleStopState of the outermost context +// without Matching any symbols, Token.EOF is added to the returned set. +// +// A nil ctx defaults to ParserRuleContext.EMPTY. +// +// It panics if the ATN does not contain state stateNumber. +func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet { + if stateNumber < 0 || stateNumber >= len(a.states) { + panic("Invalid state number.") + } + + s := a.states[stateNumber] + following := a.NextTokens(s, nil) + + if !following.contains(TokenEpsilon) { + return following + } + + expected := NewIntervalSet() + + expected.addSet(following) + expected.removeOne(TokenEpsilon) + + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := a.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + + following = a.NextTokens(rt.(*RuleTransition).followState, nil) + expected.addSet(following) + expected.removeOne(TokenEpsilon) + ctx = ctx.GetParent().(RuleContext) + } + + if following.contains(TokenEpsilon) { + expected.addOne(TokenEOF) + } + + return expected +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go new file mode 100644 index 0000000000..0535d5246c --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go @@ -0,0 +1,295 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +type comparable interface { + equals(other interface{}) bool +} + +// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic +// context). The syntactic context is a graph-structured stack node whose +// path(s) to the root is the rule invocation(s) chain used to arrive at the +// state. The semantic context is the tree of semantic predicates encountered +// before reaching an ATN state. +type ATNConfig interface { + comparable + + hash() int + + GetState() ATNState + GetAlt() int + GetSemanticContext() SemanticContext + + GetContext() PredictionContext + SetContext(PredictionContext) + + GetReachesIntoOuterContext() int + SetReachesIntoOuterContext(int) + + String() string + + getPrecedenceFilterSuppressed() bool + setPrecedenceFilterSuppressed(bool) +} + +type BaseATNConfig struct { + precedenceFilterSuppressed bool + state ATNState + alt int + context PredictionContext + semanticContext SemanticContext + reachesIntoOuterContext int +} + +func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup + return &BaseATNConfig{ + state: old.state, + alt: old.alt, + context: old.context, + semanticContext: old.semanticContext, + reachesIntoOuterContext: old.reachesIntoOuterContext, + } +} + +func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig5(state, alt, context, SemanticContextNone) +} + +func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") // TODO: Necessary? + } + + return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext} +} + +func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()) +} + +func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, c.GetContext(), semanticContext) +} + +func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig { + return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext) +} + +func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig { + return NewBaseATNConfig(c, state, context, c.GetSemanticContext()) +} + +func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig { + if semanticContext == nil { + panic("semanticContext cannot be nil") + } + + return &BaseATNConfig{ + state: state, + alt: c.GetAlt(), + context: context, + semanticContext: semanticContext, + reachesIntoOuterContext: c.GetReachesIntoOuterContext(), + precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(), + } +} + +func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool { + return b.precedenceFilterSuppressed +} + +func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) { + b.precedenceFilterSuppressed = v +} + +func (b *BaseATNConfig) GetState() ATNState { + return b.state +} + +func (b *BaseATNConfig) GetAlt() int { + return b.alt +} + +func (b *BaseATNConfig) SetContext(v PredictionContext) { + b.context = v +} +func (b *BaseATNConfig) GetContext() PredictionContext { + return b.context +} + +func (b *BaseATNConfig) GetSemanticContext() SemanticContext { + return b.semanticContext +} + +func (b *BaseATNConfig) GetReachesIntoOuterContext() int { + return b.reachesIntoOuterContext +} + +func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) { + b.reachesIntoOuterContext = v +} + +// An ATN configuration is equal to another if both have the same state, they +// predict the same alternative, and syntactic/semantic contexts are the same. +func (b *BaseATNConfig) equals(o interface{}) bool { + if b == o { + return true + } + + var other, ok = o.(*BaseATNConfig) + + if !ok { + return false + } + + var equal bool + + if b.context == nil { + equal = other.context == nil + } else { + equal = b.context.equals(other.context) + } + + var ( + nums = b.state.GetStateNumber() == other.state.GetStateNumber() + alts = b.alt == other.alt + cons = b.semanticContext.equals(other.semanticContext) + sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed + ) + + return nums && alts && cons && sups && equal +} + +func (b *BaseATNConfig) hash() int { + var c int + if b.context != nil { + c = b.context.hash() + } + + h := murmurInit(7) + h = murmurUpdate(h, b.state.GetStateNumber()) + h = murmurUpdate(h, b.alt) + h = murmurUpdate(h, c) + h = murmurUpdate(h, b.semanticContext.hash()) + return murmurFinish(h, 4) +} + +func (b *BaseATNConfig) String() string { + var s1, s2, s3 string + + if b.context != nil { + s1 = ",[" + fmt.Sprint(b.context) + "]" + } + + if b.semanticContext != SemanticContextNone { + s2 = "," + fmt.Sprint(b.semanticContext) + } + + if b.reachesIntoOuterContext > 0 { + s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext) + } + + return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3) +} + +type LexerATNConfig struct { + *BaseATNConfig + lexerActionExecutor *LexerActionExecutor + passedThroughNonGreedyDecision bool +} + +func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone), + lexerActionExecutor: lexerActionExecutor, + } +} + +func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()), + lexerActionExecutor: lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{ + BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()), + lexerActionExecutor: c.lexerActionExecutor, + passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state), + } +} + +func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig { + return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)} +} + +func (l *LexerATNConfig) hash() int { + var f int + if l.passedThroughNonGreedyDecision { + f = 1 + } else { + f = 0 + } + h := murmurInit(7) + h = murmurUpdate(h, l.state.hash()) + h = murmurUpdate(h, l.alt) + h = murmurUpdate(h, l.context.hash()) + h = murmurUpdate(h, l.semanticContext.hash()) + h = murmurUpdate(h, f) + h = murmurUpdate(h, l.lexerActionExecutor.hash()) + h = murmurFinish(h, 6) + return h +} + +func (l *LexerATNConfig) equals(other interface{}) bool { + var othert, ok = other.(*LexerATNConfig) + + if l == other { + return true + } else if !ok { + return false + } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision { + return false + } + + var b bool + + if l.lexerActionExecutor != nil { + b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor) + } else { + b = othert.lexerActionExecutor != nil + } + + if b { + return false + } + + return l.BaseATNConfig.equals(othert.BaseATNConfig) +} + + +func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool { + var ds, ok = target.(DecisionState) + + return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy()) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go new file mode 100644 index 0000000000..d9f74755fa --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go @@ -0,0 +1,387 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +type ATNConfigSet interface { + hash() int + Add(ATNConfig, *DoubleDict) bool + AddAll([]ATNConfig) bool + + GetStates() *Set + GetPredicates() []SemanticContext + GetItems() []ATNConfig + + OptimizeConfigs(interpreter *BaseATNSimulator) + + Equals(other interface{}) bool + + Length() int + IsEmpty() bool + Contains(ATNConfig) bool + ContainsFast(ATNConfig) bool + Clear() + String() string + + HasSemanticContext() bool + SetHasSemanticContext(v bool) + + ReadOnly() bool + SetReadOnly(bool) + + GetConflictingAlts() *BitSet + SetConflictingAlts(*BitSet) + + FullContext() bool + + GetUniqueAlt() int + SetUniqueAlt(int) + + GetDipsIntoOuterContext() bool + SetDipsIntoOuterContext(bool) +} + +// BaseATNConfigSet is a specialized set of ATNConfig that tracks information +// about its elements and can combine similar configurations using a +// graph-structured stack. +type BaseATNConfigSet struct { + cachedHash int + + // configLookup is used to determine whether two BaseATNConfigSets are equal. We + // need all configurations with the same (s, i, _, semctx) to be equal. A key + // effectively doubles the number of objects associated with ATNConfigs. All + // keys are hashed by (s, i, _, pi), not including the context. Wiped out when + // read-only because a set becomes a DFA state. + configLookup *Set + + // configs is the added elements. + configs []ATNConfig + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + conflictingAlts *BitSet + + // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates + // we hit a pred while computing a closure operation. Do not make a DFA state + // from the BaseATNConfigSet in this case. TODO: How is this used by parsers? + dipsIntoOuterContext bool + + // fullCtx is whether it is part of a full context LL prediction. Used to + // determine how to merge $. It is a wildcard with SLL, but not for an LL + // context merge. + fullCtx bool + + // Used in parser and lexer. In lexer, it indicates we hit a pred + // while computing a closure operation. Don't make a DFA state from a. + hasSemanticContext bool + + // readOnly is whether it is read-only. Do not + // allow any code to manipulate the set if true because DFA states will point at + // sets and those must not change. It not protect other fields; conflictingAlts + // in particular, which is assigned after readOnly. + readOnly bool + + // TODO: These fields make me pretty uncomfortable, but it is nice to pack up + // info together because it saves recomputation. Can we track conflicts as they + // are added to save scanning configs later? + uniqueAlt int +} + +func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet { + return &BaseATNConfigSet{ + cachedHash: -1, + configLookup: NewSet(nil, equalATNConfigs), + fullCtx: fullCtx, + } +} + +// Add merges contexts with existing configs for (s, i, pi, _), where s is the +// ATNConfig.state, i is the ATNConfig.alt, and pi is the +// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates +// dipsIntoOuterContext and hasSemanticContext when necessary. +func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool { + if b.readOnly { + panic("set is read-only") + } + + if config.GetSemanticContext() != SemanticContextNone { + b.hasSemanticContext = true + } + + if config.GetReachesIntoOuterContext() > 0 { + b.dipsIntoOuterContext = true + } + + existing := b.configLookup.add(config).(ATNConfig) + + if existing == config { + b.cachedHash = -1 + b.configs = append(b.configs, config) // Track order here + + return true + } + + // Merge a previous (s, i, pi, _) with it and save the result + rootIsWildcard := !b.fullCtx + merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache) + + // No need to check for existing.context because config.context is in the cache, + // since the only way to create new graphs is the "call rule" and here. We cache + // at both places. + existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext())) + + // Preserve the precedence filter suppression during the merge + if config.getPrecedenceFilterSuppressed() { + existing.setPrecedenceFilterSuppressed(true) + } + + // Replace the context because there is no need to do alt mapping + existing.SetContext(merged) + + return true +} + +func (b *BaseATNConfigSet) GetStates() *Set { + states := NewSet(nil, nil) + + for i := 0; i < len(b.configs); i++ { + states.add(b.configs[i].GetState()) + } + + return states +} + +func (b *BaseATNConfigSet) HasSemanticContext() bool { + return b.hasSemanticContext +} + +func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) { + b.hasSemanticContext = v +} + +func (b *BaseATNConfigSet) GetPredicates() []SemanticContext { + preds := make([]SemanticContext, 0) + + for i := 0; i < len(b.configs); i++ { + c := b.configs[i].GetSemanticContext() + + if c != SemanticContextNone { + preds = append(preds, c) + } + } + + return preds +} + +func (b *BaseATNConfigSet) GetItems() []ATNConfig { + return b.configs +} + +func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) { + if b.readOnly { + panic("set is read-only") + } + + if b.configLookup.length() == 0 { + return + } + + for i := 0; i < len(b.configs); i++ { + config := b.configs[i] + + config.SetContext(interpreter.getCachedContext(config.GetContext())) + } +} + +func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool { + for i := 0; i < len(coll); i++ { + b.Add(coll[i], nil) + } + + return false +} + +func (b *BaseATNConfigSet) Equals(other interface{}) bool { + if b == other { + return true + } else if _, ok := other.(*BaseATNConfigSet); !ok { + return false + } + + other2 := other.(*BaseATNConfigSet) + + return b.configs != nil && + // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary? + b.fullCtx == other2.fullCtx && + b.uniqueAlt == other2.uniqueAlt && + b.conflictingAlts == other2.conflictingAlts && + b.hasSemanticContext == other2.hasSemanticContext && + b.dipsIntoOuterContext == other2.dipsIntoOuterContext +} + +func (b *BaseATNConfigSet) hash() int { + if b.readOnly { + if b.cachedHash == -1 { + b.cachedHash = b.hashCodeConfigs() + } + + return b.cachedHash + } + + return b.hashCodeConfigs() +} + +func (b *BaseATNConfigSet) hashCodeConfigs() int { + h := murmurInit(1) + for _, c := range b.configs { + if c != nil { + h = murmurUpdate(h, c.hash()) + } + } + return murmurFinish(h, len(b.configs)) +} + +func (b *BaseATNConfigSet) Length() int { + return len(b.configs) +} + +func (b *BaseATNConfigSet) IsEmpty() bool { + return len(b.configs) == 0 +} + +func (b *BaseATNConfigSet) Contains(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.contains(item) +} + +func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool { + if b.configLookup == nil { + panic("not implemented for read-only sets") + } + + return b.configLookup.contains(item) // TODO: containsFast is not implemented for Set +} + +func (b *BaseATNConfigSet) Clear() { + if b.readOnly { + panic("set is read-only") + } + + b.configs = make([]ATNConfig, 0) + b.cachedHash = -1 + b.configLookup = NewSet(nil, equalATNConfigs) +} + +func (b *BaseATNConfigSet) FullContext() bool { + return b.fullCtx +} + +func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool { + return b.dipsIntoOuterContext +} + +func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) { + b.dipsIntoOuterContext = v +} + +func (b *BaseATNConfigSet) GetUniqueAlt() int { + return b.uniqueAlt +} + +func (b *BaseATNConfigSet) SetUniqueAlt(v int) { + b.uniqueAlt = v +} + +func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet { + return b.conflictingAlts +} + +func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) { + b.conflictingAlts = v +} + +func (b *BaseATNConfigSet) ReadOnly() bool { + return b.readOnly +} + +func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) { + b.readOnly = readOnly + + if readOnly { + b.configLookup = nil // Read only, so no need for the lookup cache + } +} + +func (b *BaseATNConfigSet) String() string { + s := "[" + + for i, c := range b.configs { + s += c.String() + + if i != len(b.configs)-1 { + s += ", " + } + } + + s += "]" + + if b.hasSemanticContext { + s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext) + } + + if b.uniqueAlt != ATNInvalidAltNumber { + s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt) + } + + if b.conflictingAlts != nil { + s += ",conflictingAlts=" + b.conflictingAlts.String() + } + + if b.dipsIntoOuterContext { + s += ",dipsIntoOuterContext" + } + + return s +} + +type OrderedATNConfigSet struct { + *BaseATNConfigSet +} + +func NewOrderedATNConfigSet() *OrderedATNConfigSet { + b := NewBaseATNConfigSet(false) + + b.configLookup = NewSet(nil, nil) + + return &OrderedATNConfigSet{BaseATNConfigSet: b} +} + +func equalATNConfigs(a, b interface{}) bool { + if a == nil || b == nil { + return false + } + + if a == b { + return true + } + + var ai, ok = a.(ATNConfig) + var bi, ok1 = b.(ATNConfig) + + if !ok || !ok1 { + return false + } + + nums := ai.GetState().GetStateNumber() == bi.GetState().GetStateNumber() + alts := ai.GetAlt() == bi.GetAlt() + cons := ai.GetSemanticContext().equals(bi.GetSemanticContext()) + + return nums && alts && cons +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go new file mode 100644 index 0000000000..18b89efafb --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go @@ -0,0 +1,25 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNDeserializationOptionsdefaultOptions = &ATNDeserializationOptions{true, false, false} + +type ATNDeserializationOptions struct { + readOnly bool + verifyATN bool + generateRuleBypassTransitions bool +} + +func NewATNDeserializationOptions(CopyFrom *ATNDeserializationOptions) *ATNDeserializationOptions { + o := new(ATNDeserializationOptions) + + if CopyFrom != nil { + o.readOnly = CopyFrom.readOnly + o.verifyATN = CopyFrom.verifyATN + o.generateRuleBypassTransitions = CopyFrom.generateRuleBypassTransitions + } + + return o +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go new file mode 100644 index 0000000000..884d39cf7c --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go @@ -0,0 +1,828 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "encoding/hex" + "fmt" + "strconv" + "strings" + "unicode/utf16" +) + +// This is the earliest supported serialized UUID. +// stick to serialized version for now, we don't need a UUID instance +var BaseSerializedUUID = "AADB8D7E-AEEF-4415-AD2B-8204D6CF042E" +var AddedUnicodeSMP = "59627784-3BE5-417A-B9EB-8131A7286089" + +// This list contains all of the currently supported UUIDs, ordered by when +// the feature first appeared in this branch. +var SupportedUUIDs = []string{BaseSerializedUUID, AddedUnicodeSMP} + +var SerializedVersion = 3 + +// This is the current serialized UUID. +var SerializedUUID = AddedUnicodeSMP + +type LoopEndStateIntPair struct { + item0 *LoopEndState + item1 int +} + +type BlockStartStateIntPair struct { + item0 BlockStartState + item1 int +} + +type ATNDeserializer struct { + deserializationOptions *ATNDeserializationOptions + data []rune + pos int + uuid string +} + +func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer { + if options == nil { + options = ATNDeserializationOptionsdefaultOptions + } + + return &ATNDeserializer{deserializationOptions: options} +} + +func stringInSlice(a string, list []string) int { + for i, b := range list { + if b == a { + return i + } + } + + return -1 +} + +// isFeatureSupported determines if a particular serialized representation of an +// ATN supports a particular feature, identified by the UUID used for +// serializing the ATN at the time the feature was first introduced. Feature is +// the UUID marking the first time the feature was supported in the serialized +// ATN. ActualUuid is the UUID of the actual serialized ATN which is currently +// being deserialized. It returns true if actualUuid represents a serialized ATN +// at or after the feature identified by feature was introduced, and otherwise +// false. +func (a *ATNDeserializer) isFeatureSupported(feature, actualUUID string) bool { + idx1 := stringInSlice(feature, SupportedUUIDs) + + if idx1 < 0 { + return false + } + + idx2 := stringInSlice(actualUUID, SupportedUUIDs) + + return idx2 >= idx1 +} + +func (a *ATNDeserializer) DeserializeFromUInt16(data []uint16) *ATN { + a.reset(utf16.Decode(data)) + a.checkVersion() + a.checkUUID() + + atn := a.readATN() + + a.readStates(atn) + a.readRules(atn) + a.readModes(atn) + + sets := make([]*IntervalSet, 0) + + // First, deserialize sets with 16-bit arguments <= U+FFFF. + sets = a.readSets(atn, sets, a.readInt) + // Next, if the ATN was serialized with the Unicode SMP feature, + // deserialize sets with 32-bit arguments <= U+10FFFF. + if (a.isFeatureSupported(AddedUnicodeSMP, a.uuid)) { + sets = a.readSets(atn, sets, a.readInt32) + } + + a.readEdges(atn, sets) + a.readDecisions(atn) + a.readLexerActions(atn) + a.markPrecedenceDecisions(atn) + a.verifyATN(atn) + + if a.deserializationOptions.generateRuleBypassTransitions && atn.grammarType == ATNTypeParser { + a.generateRuleBypassTransitions(atn) + // Re-verify after modification + a.verifyATN(atn) + } + + return atn + +} + +func (a *ATNDeserializer) reset(data []rune) { + temp := make([]rune, len(data)) + + for i, c := range data { + // Don't adjust the first value since that's the version number + if i == 0 { + temp[i] = c + } else if c > 1 { + temp[i] = c - 2 + } else { + temp[i] = c + 65533 + } + } + + a.data = temp + a.pos = 0 +} + +func (a *ATNDeserializer) checkVersion() { + version := a.readInt() + + if version != SerializedVersion { + panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(SerializedVersion) + ").") + } +} + +func (a *ATNDeserializer) checkUUID() { + uuid := a.readUUID() + + if stringInSlice(uuid, SupportedUUIDs) < 0 { + panic("Could not deserialize ATN with UUID: " + uuid + " (expected " + SerializedUUID + " or a legacy UUID).") + } + + a.uuid = uuid +} + +func (a *ATNDeserializer) readATN() *ATN { + grammarType := a.readInt() + maxTokenType := a.readInt() + + return NewATN(grammarType, maxTokenType) +} + +func (a *ATNDeserializer) readStates(atn *ATN) { + loopBackStateNumbers := make([]LoopEndStateIntPair, 0) + endStateNumbers := make([]BlockStartStateIntPair, 0) + + nstates := a.readInt() + + for i := 0; i < nstates; i++ { + stype := a.readInt() + + // Ignore bad types of states + if stype == ATNStateInvalidType { + atn.addState(nil) + + continue + } + + ruleIndex := a.readInt() + + if ruleIndex == 0xFFFF { + ruleIndex = -1 + } + + s := a.stateFactory(stype, ruleIndex) + + if stype == ATNStateLoopEnd { + loopBackStateNumber := a.readInt() + + loopBackStateNumbers = append(loopBackStateNumbers, LoopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber}) + } else if s2, ok := s.(BlockStartState); ok { + endStateNumber := a.readInt() + + endStateNumbers = append(endStateNumbers, BlockStartStateIntPair{s2, endStateNumber}) + } + + atn.addState(s) + } + + // Delay the assignment of loop back and end states until we know all the state + // instances have been initialized + for j := 0; j < len(loopBackStateNumbers); j++ { + pair := loopBackStateNumbers[j] + + pair.item0.loopBackState = atn.states[pair.item1] + } + + for j := 0; j < len(endStateNumbers); j++ { + pair := endStateNumbers[j] + + pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState)) + } + + numNonGreedyStates := a.readInt() + + for j := 0; j < numNonGreedyStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(DecisionState).setNonGreedy(true) + } + + numPrecedenceStates := a.readInt() + + for j := 0; j < numPrecedenceStates; j++ { + stateNumber := a.readInt() + + atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true + } +} + +func (a *ATNDeserializer) readRules(atn *ATN) { + nrules := a.readInt() + + if atn.grammarType == ATNTypeLexer { + atn.ruleToTokenType = make([]int, nrules) // TODO: initIntArray(nrules, 0) + } + + atn.ruleToStartState = make([]*RuleStartState, nrules) // TODO: initIntArray(nrules, 0) + + for i := 0; i < nrules; i++ { + s := a.readInt() + startState := atn.states[s].(*RuleStartState) + + atn.ruleToStartState[i] = startState + + if atn.grammarType == ATNTypeLexer { + tokenType := a.readInt() + + if tokenType == 0xFFFF { + tokenType = TokenEOF + } + + atn.ruleToTokenType[i] = tokenType + } + } + + atn.ruleToStopState = make([]*RuleStopState, nrules) //initIntArray(nrules, 0) + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if s2, ok := state.(*RuleStopState); ok { + atn.ruleToStopState[s2.ruleIndex] = s2 + atn.ruleToStartState[s2.ruleIndex].stopState = s2 + } + } +} + +func (a *ATNDeserializer) readModes(atn *ATN) { + nmodes := a.readInt() + + for i := 0; i < nmodes; i++ { + s := a.readInt() + + atn.modeToStartState = append(atn.modeToStartState, atn.states[s].(*TokensStartState)) + } +} + +func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet, readUnicode func() int) []*IntervalSet { + m := a.readInt() + + for i := 0; i < m; i++ { + iset := NewIntervalSet() + + sets = append(sets, iset) + + n := a.readInt() + containsEOF := a.readInt() + + if containsEOF != 0 { + iset.addOne(-1) + } + + for j := 0; j < n; j++ { + i1 := readUnicode() + i2 := readUnicode() + + iset.addRange(i1, i2) + } + } + + return sets +} + +func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) { + nedges := a.readInt() + + for i := 0; i < nedges; i++ { + var ( + src = a.readInt() + trg = a.readInt() + ttype = a.readInt() + arg1 = a.readInt() + arg2 = a.readInt() + arg3 = a.readInt() + trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets) + srcState = atn.states[src] + ) + + srcState.AddTransition(trans, -1) + } + + // Edges for rule stop states can be derived, so they are not serialized + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + var t, ok = state.GetTransitions()[j].(*RuleTransition) + + if !ok { + continue + } + + outermostPrecedenceReturn := -1 + + if atn.ruleToStartState[t.getTarget().GetRuleIndex()].isPrecedenceRule { + if t.precedence == 0 { + outermostPrecedenceReturn = t.getTarget().GetRuleIndex() + } + } + + trans := NewEpsilonTransition(t.followState, outermostPrecedenceReturn) + + atn.ruleToStopState[t.getTarget().GetRuleIndex()].AddTransition(trans, -1) + } + } + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if s2, ok := state.(*BaseBlockStartState); ok { + // We need to know the end state to set its start state + if s2.endState == nil { + panic("IllegalState") + } + + // Block end states can only be associated to a single block start state + if s2.endState.startState != nil { + panic("IllegalState") + } + + s2.endState.startState = state + } + + if s2, ok := state.(*PlusLoopbackState); ok { + for j := 0; j < len(s2.GetTransitions()); j++ { + target := s2.GetTransitions()[j].getTarget() + + if t2, ok := target.(*PlusBlockStartState); ok { + t2.loopBackState = state + } + } + } else if s2, ok := state.(*StarLoopbackState); ok { + for j := 0; j < len(s2.GetTransitions()); j++ { + target := s2.GetTransitions()[j].getTarget() + + if t2, ok := target.(*StarLoopEntryState); ok { + t2.loopBackState = state + } + } + } + } +} + +func (a *ATNDeserializer) readDecisions(atn *ATN) { + ndecisions := a.readInt() + + for i := 0; i < ndecisions; i++ { + s := a.readInt() + decState := atn.states[s].(DecisionState) + + atn.DecisionToState = append(atn.DecisionToState, decState) + decState.setDecision(i) + } +} + +func (a *ATNDeserializer) readLexerActions(atn *ATN) { + if atn.grammarType == ATNTypeLexer { + count := a.readInt() + + atn.lexerActions = make([]LexerAction, count) // initIntArray(count, nil) + + for i := 0; i < count; i++ { + actionType := a.readInt() + data1 := a.readInt() + + if data1 == 0xFFFF { + data1 = -1 + } + + data2 := a.readInt() + + if data2 == 0xFFFF { + data2 = -1 + } + + lexerAction := a.lexerActionFactory(actionType, data1, data2) + + atn.lexerActions[i] = lexerAction + } + } +} + +func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) { + count := len(atn.ruleToStartState) + + for i := 0; i < count; i++ { + atn.ruleToTokenType[i] = atn.maxTokenType + i + 1 + } + + for i := 0; i < count; i++ { + a.generateRuleBypassTransition(atn, i) + } +} + +func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) { + bypassStart := NewBasicBlockStartState() + + bypassStart.ruleIndex = idx + atn.addState(bypassStart) + + bypassStop := NewBlockEndState() + + bypassStop.ruleIndex = idx + atn.addState(bypassStop) + + bypassStart.endState = bypassStop + + atn.defineDecisionState(bypassStart.BaseDecisionState) + + bypassStop.startState = bypassStart + + var excludeTransition Transition + var endState ATNState + + if atn.ruleToStartState[idx].isPrecedenceRule { + // Wrap from the beginning of the rule to the StarLoopEntryState + endState = nil + + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if a.stateIsEndStateFor(state, idx) != nil { + endState = state + excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0] + + break + } + } + + if excludeTransition == nil { + panic("Couldn't identify final state of the precedence rule prefix section.") + } + } else { + endState = atn.ruleToStopState[idx] + } + + // All non-excluded transitions that currently target end state need to target + // blockEnd instead + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + for j := 0; j < len(state.GetTransitions()); j++ { + transition := state.GetTransitions()[j] + + if transition == excludeTransition { + continue + } + + if transition.getTarget() == endState { + transition.setTarget(bypassStop) + } + } + } + + // All transitions leaving the rule start state need to leave blockStart instead + ruleToStartState := atn.ruleToStartState[idx] + count := len(ruleToStartState.GetTransitions()) + + for count > 0 { + bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1) + ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]}) + } + + // Link the new states + atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1) + bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1) + + MatchState := NewBasicState() + + atn.addState(MatchState) + MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1) + bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1) +} + +func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState { + if state.GetRuleIndex() != idx { + return nil + } + + if _, ok := state.(*StarLoopEntryState); !ok { + return nil + } + + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if _, ok := maybeLoopEndState.(*LoopEndState); !ok { + return nil + } + + var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok { + return state + } + + return nil +} + +// markPrecedenceDecisions analyzes the StarLoopEntryState states in the +// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to +// the correct value. +func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) { + for _, state := range atn.states { + if _, ok := state.(*StarLoopEntryState); !ok { + continue + } + + // We analyze the ATN to determine if a ATN decision state is the + // decision for the closure block that determines whether a + // precedence rule should continue or complete. + if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule { + maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget() + + if s3, ok := maybeLoopEndState.(*LoopEndState); ok { + var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState) + + if s3.epsilonOnlyTransitions && ok2 { + state.(*StarLoopEntryState).precedenceRuleDecision = true + } + } + } + } +} + +func (a *ATNDeserializer) verifyATN(atn *ATN) { + if !a.deserializationOptions.verifyATN { + return + } + + // Verify assumptions + for i := 0; i < len(atn.states); i++ { + state := atn.states[i] + + if state == nil { + continue + } + + a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "") + + switch s2 := state.(type) { + case *PlusBlockStartState: + a.checkCondition(s2.loopBackState != nil, "") + + case *StarLoopEntryState: + a.checkCondition(s2.loopBackState != nil, "") + a.checkCondition(len(s2.GetTransitions()) == 2, "") + + switch s2 := state.(type) { + case *StarBlockStartState: + var _, ok2 = s2.GetTransitions()[1].getTarget().(*LoopEndState) + + a.checkCondition(ok2, "") + a.checkCondition(!s2.nonGreedy, "") + + case *LoopEndState: + var s3, ok2 = s2.GetTransitions()[1].getTarget().(*StarBlockStartState) + + a.checkCondition(ok2, "") + a.checkCondition(s3.nonGreedy, "") + + default: + panic("IllegalState") + } + + case *StarLoopbackState: + a.checkCondition(len(state.GetTransitions()) == 1, "") + + var _, ok2 = state.GetTransitions()[0].getTarget().(*StarLoopEntryState) + + a.checkCondition(ok2, "") + + case *LoopEndState: + a.checkCondition(s2.loopBackState != nil, "") + + case *RuleStartState: + a.checkCondition(s2.stopState != nil, "") + + case *BaseBlockStartState: + a.checkCondition(s2.endState != nil, "") + + case *BlockEndState: + a.checkCondition(s2.startState != nil, "") + + case DecisionState: + a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "") + + default: + var _, ok = s2.(*RuleStopState) + + a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "") + } + } +} + +func (a *ATNDeserializer) checkCondition(condition bool, message string) { + if !condition { + if message == "" { + message = "IllegalState" + } + + panic(message) + } +} + +func (a *ATNDeserializer) readInt() int { + v := a.data[a.pos] + + a.pos++ + + return int(v) +} + +func (a *ATNDeserializer) readInt32() int { + var low = a.readInt() + var high = a.readInt() + return low | (high << 16) +} + +//TODO +//func (a *ATNDeserializer) readLong() int64 { +// panic("Not implemented") +// var low = a.readInt32() +// var high = a.readInt32() +// return (low & 0x00000000FFFFFFFF) | (high << int32) +//} + +func createByteToHex() []string { + bth := make([]string, 256) + + for i := 0; i < 256; i++ { + bth[i] = strings.ToUpper(hex.EncodeToString([]byte{byte(i)})) + } + + return bth +} + +var byteToHex = createByteToHex() + +func (a *ATNDeserializer) readUUID() string { + bb := make([]int, 16) + + for i := 7; i >= 0; i-- { + integer := a.readInt() + + bb[(2*i)+1] = integer & 0xFF + bb[2*i] = (integer >> 8) & 0xFF + } + + return byteToHex[bb[0]] + byteToHex[bb[1]] + + byteToHex[bb[2]] + byteToHex[bb[3]] + "-" + + byteToHex[bb[4]] + byteToHex[bb[5]] + "-" + + byteToHex[bb[6]] + byteToHex[bb[7]] + "-" + + byteToHex[bb[8]] + byteToHex[bb[9]] + "-" + + byteToHex[bb[10]] + byteToHex[bb[11]] + + byteToHex[bb[12]] + byteToHex[bb[13]] + + byteToHex[bb[14]] + byteToHex[bb[15]] +} + +func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition { + target := atn.states[trg] + + switch typeIndex { + case TransitionEPSILON: + return NewEpsilonTransition(target, -1) + + case TransitionRANGE: + if arg3 != 0 { + return NewRangeTransition(target, TokenEOF, arg2) + } + + return NewRangeTransition(target, arg1, arg2) + + case TransitionRULE: + return NewRuleTransition(atn.states[arg1], arg2, arg3, target) + + case TransitionPREDICATE: + return NewPredicateTransition(target, arg1, arg2, arg3 != 0) + + case TransitionPRECEDENCE: + return NewPrecedencePredicateTransition(target, arg1) + + case TransitionATOM: + if arg3 != 0 { + return NewAtomTransition(target, TokenEOF) + } + + return NewAtomTransition(target, arg1) + + case TransitionACTION: + return NewActionTransition(target, arg1, arg2, arg3 != 0) + + case TransitionSET: + return NewSetTransition(target, sets[arg1]) + + case TransitionNOTSET: + return NewNotSetTransition(target, sets[arg1]) + + case TransitionWILDCARD: + return NewWildcardTransition(target) + } + + panic("The specified transition type is not valid.") +} + +func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState { + var s ATNState + + switch typeIndex { + case ATNStateInvalidType: + return nil + + case ATNStateBasic: + s = NewBasicState() + + case ATNStateRuleStart: + s = NewRuleStartState() + + case ATNStateBlockStart: + s = NewBasicBlockStartState() + + case ATNStatePlusBlockStart: + s = NewPlusBlockStartState() + + case ATNStateStarBlockStart: + s = NewStarBlockStartState() + + case ATNStateTokenStart: + s = NewTokensStartState() + + case ATNStateRuleStop: + s = NewRuleStopState() + + case ATNStateBlockEnd: + s = NewBlockEndState() + + case ATNStateStarLoopBack: + s = NewStarLoopbackState() + + case ATNStateStarLoopEntry: + s = NewStarLoopEntryState() + + case ATNStatePlusLoopBack: + s = NewPlusLoopbackState() + + case ATNStateLoopEnd: + s = NewLoopEndState() + + default: + panic(fmt.Sprintf("state type %d is invalid", typeIndex)) + } + + s.SetRuleIndex(ruleIndex) + + return s +} + +func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction { + switch typeIndex { + case LexerActionTypeChannel: + return NewLexerChannelAction(data1) + + case LexerActionTypeCustom: + return NewLexerCustomAction(data1, data2) + + case LexerActionTypeMode: + return NewLexerModeAction(data1) + + case LexerActionTypeMore: + return LexerMoreActionINSTANCE + + case LexerActionTypePopMode: + return LexerPopModeActionINSTANCE + + case LexerActionTypePushMode: + return NewLexerPushModeAction(data1) + + case LexerActionTypeSkip: + return LexerSkipActionINSTANCE + + case LexerActionTypeType: + return NewLexerTypeAction(data1) + + default: + panic(fmt.Sprintf("lexer action %d is invalid", typeIndex)) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go new file mode 100644 index 0000000000..d5454d6d5d --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go @@ -0,0 +1,50 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false)) + +type IATNSimulator interface { + SharedContextCache() *PredictionContextCache + ATN() *ATN + DecisionToDFA() []*DFA +} + +type BaseATNSimulator struct { + atn *ATN + sharedContextCache *PredictionContextCache + decisionToDFA []*DFA +} + +func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator { + b := new(BaseATNSimulator) + + b.atn = atn + b.sharedContextCache = sharedContextCache + + return b +} + +func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext { + if b.sharedContextCache == nil { + return context + } + + visited := make(map[PredictionContext]PredictionContext) + + return getCachedBasePredictionContext(context, b.sharedContextCache, visited) +} + +func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache { + return b.sharedContextCache +} + +func (b *BaseATNSimulator) ATN() *ATN { + return b.atn +} + +func (b *BaseATNSimulator) DecisionToDFA() []*DFA { + return b.decisionToDFA +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go new file mode 100644 index 0000000000..563d5db38d --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go @@ -0,0 +1,386 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +// Constants for serialization. +const ( + ATNStateInvalidType = 0 + ATNStateBasic = 1 + ATNStateRuleStart = 2 + ATNStateBlockStart = 3 + ATNStatePlusBlockStart = 4 + ATNStateStarBlockStart = 5 + ATNStateTokenStart = 6 + ATNStateRuleStop = 7 + ATNStateBlockEnd = 8 + ATNStateStarLoopBack = 9 + ATNStateStarLoopEntry = 10 + ATNStatePlusLoopBack = 11 + ATNStateLoopEnd = 12 + + ATNStateInvalidStateNumber = -1 +) + +var ATNStateInitialNumTransitions = 4 + +type ATNState interface { + GetEpsilonOnlyTransitions() bool + + GetRuleIndex() int + SetRuleIndex(int) + + GetNextTokenWithinRule() *IntervalSet + SetNextTokenWithinRule(*IntervalSet) + + GetATN() *ATN + SetATN(*ATN) + + GetStateType() int + + GetStateNumber() int + SetStateNumber(int) + + GetTransitions() []Transition + SetTransitions([]Transition) + AddTransition(Transition, int) + + String() string + hash() int +} + +type BaseATNState struct { + // NextTokenWithinRule caches lookahead during parsing. Not used during construction. + NextTokenWithinRule *IntervalSet + + // atn is the current ATN. + atn *ATN + + epsilonOnlyTransitions bool + + // ruleIndex tracks the Rule index because there are no Rule objects at runtime. + ruleIndex int + + stateNumber int + + stateType int + + // Track the transitions emanating from this ATN state. + transitions []Transition +} + +func NewBaseATNState() *BaseATNState { + return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType} +} + +func (as *BaseATNState) GetRuleIndex() int { + return as.ruleIndex +} + +func (as *BaseATNState) SetRuleIndex(v int) { + as.ruleIndex = v +} +func (as *BaseATNState) GetEpsilonOnlyTransitions() bool { + return as.epsilonOnlyTransitions +} + +func (as *BaseATNState) GetATN() *ATN { + return as.atn +} + +func (as *BaseATNState) SetATN(atn *ATN) { + as.atn = atn +} + +func (as *BaseATNState) GetTransitions() []Transition { + return as.transitions +} + +func (as *BaseATNState) SetTransitions(t []Transition) { + as.transitions = t +} + +func (as *BaseATNState) GetStateType() int { + return as.stateType +} + +func (as *BaseATNState) GetStateNumber() int { + return as.stateNumber +} + +func (as *BaseATNState) SetStateNumber(stateNumber int) { + as.stateNumber = stateNumber +} + +func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet { + return as.NextTokenWithinRule +} + +func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) { + as.NextTokenWithinRule = v +} + +func (as *BaseATNState) hash() int { + return as.stateNumber +} + +func (as *BaseATNState) String() string { + return strconv.Itoa(as.stateNumber) +} + +func (as *BaseATNState) equals(other interface{}) bool { + if ot, ok := other.(ATNState); ok { + return as.stateNumber == ot.GetStateNumber() + } + + return false +} + +func (as *BaseATNState) isNonGreedyExitState() bool { + return false +} + +func (as *BaseATNState) AddTransition(trans Transition, index int) { + if len(as.transitions) == 0 { + as.epsilonOnlyTransitions = trans.getIsEpsilon() + } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() { + as.epsilonOnlyTransitions = false + } + + if index == -1 { + as.transitions = append(as.transitions, trans) + } else { + as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...) + // TODO: as.transitions.splice(index, 1, trans) + } +} + +type BasicState struct { + *BaseATNState +} + +func NewBasicState() *BasicState { + b := NewBaseATNState() + + b.stateType = ATNStateBasic + + return &BasicState{BaseATNState: b} +} + +type DecisionState interface { + ATNState + + getDecision() int + setDecision(int) + + getNonGreedy() bool + setNonGreedy(bool) +} + +type BaseDecisionState struct { + *BaseATNState + decision int + nonGreedy bool +} + +func NewBaseDecisionState() *BaseDecisionState { + return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1} +} + +func (s *BaseDecisionState) getDecision() int { + return s.decision +} + +func (s *BaseDecisionState) setDecision(b int) { + s.decision = b +} + +func (s *BaseDecisionState) getNonGreedy() bool { + return s.nonGreedy +} + +func (s *BaseDecisionState) setNonGreedy(b bool) { + s.nonGreedy = b +} + +type BlockStartState interface { + DecisionState + + getEndState() *BlockEndState + setEndState(*BlockEndState) +} + +// BaseBlockStartState is the start of a regular (...) block. +type BaseBlockStartState struct { + *BaseDecisionState + endState *BlockEndState +} + +func NewBlockStartState() *BaseBlockStartState { + return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()} +} + +func (s *BaseBlockStartState) getEndState() *BlockEndState { + return s.endState +} + +func (s *BaseBlockStartState) setEndState(b *BlockEndState) { + s.endState = b +} + +type BasicBlockStartState struct { + *BaseBlockStartState +} + +func NewBasicBlockStartState() *BasicBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateBlockStart + + return &BasicBlockStartState{BaseBlockStartState: b} +} + +// BlockEndState is a terminal node of a simple (a|b|c) block. +type BlockEndState struct { + *BaseATNState + startState ATNState +} + +func NewBlockEndState() *BlockEndState { + b := NewBaseATNState() + + b.stateType = ATNStateBlockEnd + + return &BlockEndState{BaseATNState: b} +} + +// RuleStopState is the last node in the ATN for a rule, unless that rule is the +// start symbol. In that case, there is one transition to EOF. Later, we might +// encode references to all calls to this rule to compute FOLLOW sets for error +// handling. +type RuleStopState struct { + *BaseATNState +} + +func NewRuleStopState() *RuleStopState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStop + + return &RuleStopState{BaseATNState: b} +} + +type RuleStartState struct { + *BaseATNState + stopState ATNState + isPrecedenceRule bool +} + +func NewRuleStartState() *RuleStartState { + b := NewBaseATNState() + + b.stateType = ATNStateRuleStart + + return &RuleStartState{BaseATNState: b} +} + +// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two +// transitions: one to the loop back to start of the block, and one to exit. +type PlusLoopbackState struct { + *BaseDecisionState +} + +func NewPlusLoopbackState() *PlusLoopbackState { + b := NewBaseDecisionState() + + b.stateType = ATNStatePlusLoopBack + + return &PlusLoopbackState{BaseDecisionState: b} +} + +// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a +// decision state; we don't use it for code generation. Somebody might need it, +// it is included for completeness. In reality, PlusLoopbackState is the real +// decision-making node for A+. +type PlusBlockStartState struct { + *BaseBlockStartState + loopBackState ATNState +} + +func NewPlusBlockStartState() *PlusBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStatePlusBlockStart + + return &PlusBlockStartState{BaseBlockStartState: b} +} + +// StarBlockStartState is the block that begins a closure loop. +type StarBlockStartState struct { + *BaseBlockStartState +} + +func NewStarBlockStartState() *StarBlockStartState { + b := NewBlockStartState() + + b.stateType = ATNStateStarBlockStart + + return &StarBlockStartState{BaseBlockStartState: b} +} + +type StarLoopbackState struct { + *BaseATNState +} + +func NewStarLoopbackState() *StarLoopbackState { + b := NewBaseATNState() + + b.stateType = ATNStateStarLoopBack + + return &StarLoopbackState{BaseATNState: b} +} + +type StarLoopEntryState struct { + *BaseDecisionState + loopBackState ATNState + precedenceRuleDecision bool +} + +func NewStarLoopEntryState() *StarLoopEntryState { + b := NewBaseDecisionState() + + b.stateType = ATNStateStarLoopEntry + + // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making. + return &StarLoopEntryState{BaseDecisionState: b} +} + +// LoopEndState marks the end of a * or + loop. +type LoopEndState struct { + *BaseATNState + loopBackState ATNState +} + +func NewLoopEndState() *LoopEndState { + b := NewBaseATNState() + + b.stateType = ATNStateLoopEnd + + return &LoopEndState{BaseATNState: b} +} + +// TokensStartState is the Tokens rule start state linking to each lexer rule start state. +type TokensStartState struct { + *BaseDecisionState +} + +func NewTokensStartState() *TokensStartState { + b := NewBaseDecisionState() + + b.stateType = ATNStateTokenStart + + return &TokensStartState{BaseDecisionState: b} +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go new file mode 100644 index 0000000000..a7b48976b3 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go @@ -0,0 +1,11 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represent the type of recognizer an ATN applies to. +const ( + ATNTypeLexer = 0 + ATNTypeParser = 1 +) diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go new file mode 100644 index 0000000000..70c1207f7f --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go @@ -0,0 +1,12 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type CharStream interface { + IntStream + GetText(int, int) string + GetTextFromTokens(start, end Token) string + GetTextFromInterval(*Interval) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go new file mode 100644 index 0000000000..330ff8f31f --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go @@ -0,0 +1,56 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// TokenFactory creates CommonToken objects. +type TokenFactory interface { + Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token +} + +// CommonTokenFactory is the default TokenFactory implementation. +type CommonTokenFactory struct { + // copyText indicates whether CommonToken.setText should be called after + // constructing tokens to explicitly set the text. This is useful for cases + // where the input stream might not be able to provide arbitrary substrings of + // text from the input after the lexer creates a token (e.g. the + // implementation of CharStream.GetText in UnbufferedCharStream panics an + // UnsupportedOperationException). Explicitly setting the token text allows + // Token.GetText to be called at any time regardless of the input stream + // implementation. + // + // The default value is false to avoid the performance and memory overhead of + // copying text for every token unless explicitly requested. + copyText bool +} + +func NewCommonTokenFactory(copyText bool) *CommonTokenFactory { + return &CommonTokenFactory{copyText: copyText} +} + +// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not +// explicitly copy token text when constructing tokens. +var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false) + +func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token { + t := NewCommonToken(source, ttype, channel, start, stop) + + t.line = line + t.column = column + + if text != "" { + t.SetText(text) + } else if c.copyText && source.charStream != nil { + t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop))) + } + + return t +} + +func (c *CommonTokenFactory) createThin(ttype int, text string) Token { + t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1) + t.SetText(text) + + return t +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go new file mode 100644 index 0000000000..c90e9b8904 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go @@ -0,0 +1,447 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// CommonTokenStream is an implementation of TokenStream that loads tokens from +// a TokenSource on-demand and places the tokens in a buffer to provide access +// to any previous token by index. This token stream ignores the value of +// Token.getChannel. If your parser requires the token stream filter tokens to +// only those on a particular channel, such as Token.DEFAULT_CHANNEL or +// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream. +type CommonTokenStream struct { + channel int + + // fetchedEOF indicates whether the Token.EOF token has been fetched from + // tokenSource and added to tokens. This field improves performance for the + // following cases: + // + // consume: The lookahead check in consume to preven consuming the EOF symbol is + // optimized by checking the values of fetchedEOF and p instead of calling LA. + // + // fetch: The check to prevent adding multiple EOF symbols into tokens is + // trivial with bt field. + fetchedEOF bool + + // index indexs into tokens of the current token (next token to consume). + // tokens[p] should be LT(1). It is set to -1 when the stream is first + // constructed or when SetTokenSource is called, indicating that the first token + // has not yet been fetched from the token source. For additional information, + // see the documentation of IntStream for a description of initializing methods. + index int + + // tokenSource is the TokenSource from which tokens for the bt stream are + // fetched. + tokenSource TokenSource + + // tokens is all tokens fetched from the token source. The list is considered a + // complete view of the input once fetchedEOF is set to true. + tokens []Token +} + +func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream { + return &CommonTokenStream{ + channel: channel, + index: -1, + tokenSource: lexer, + tokens: make([]Token, 0), + } +} + +func (c *CommonTokenStream) GetAllTokens() []Token { + return c.tokens +} + +func (c *CommonTokenStream) Mark() int { + return 0 +} + +func (c *CommonTokenStream) Release(marker int) {} + +func (c *CommonTokenStream) reset() { + c.Seek(0) +} + +func (c *CommonTokenStream) Seek(index int) { + c.lazyInit() + c.index = c.adjustSeekIndex(index) +} + +func (c *CommonTokenStream) Get(index int) Token { + c.lazyInit() + + return c.tokens[index] +} + +func (c *CommonTokenStream) Consume() { + SkipEOFCheck := false + + if c.index >= 0 { + if c.fetchedEOF { + // The last token in tokens is EOF. Skip the check if p indexes any fetched. + // token except the last. + SkipEOFCheck = c.index < len(c.tokens)-1 + } else { + // No EOF token in tokens. Skip the check if p indexes a fetched token. + SkipEOFCheck = c.index < len(c.tokens) + } + } else { + // Not yet initialized + SkipEOFCheck = false + } + + if !SkipEOFCheck && c.LA(1) == TokenEOF { + panic("cannot consume EOF") + } + + if c.Sync(c.index + 1) { + c.index = c.adjustSeekIndex(c.index + 1) + } +} + +// Sync makes sure index i in tokens has a token and returns true if a token is +// located at index i and otherwise false. +func (c *CommonTokenStream) Sync(i int) bool { + n := i - len(c.tokens) + 1 // TODO: How many more elements do we need? + + if n > 0 { + fetched := c.fetch(n) + return fetched >= n + } + + return true +} + +// fetch adds n elements to buffer and returns the actual number of elements +// added to the buffer. +func (c *CommonTokenStream) fetch(n int) int { + if c.fetchedEOF { + return 0 + } + + for i := 0; i < n; i++ { + t := c.tokenSource.NextToken() + + t.SetTokenIndex(len(c.tokens)) + c.tokens = append(c.tokens, t) + + if t.GetTokenType() == TokenEOF { + c.fetchedEOF = true + + return i + 1 + } + } + + return n +} + +// GetTokens gets all tokens from start to stop inclusive. +func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token { + if start < 0 || stop < 0 { + return nil + } + + c.lazyInit() + + subset := make([]Token, 0) + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + for i := start; i < stop; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + if types == nil || types.contains(t.GetTokenType()) { + subset = append(subset, t) + } + } + + return subset +} + +func (c *CommonTokenStream) LA(i int) int { + return c.LT(i).GetTokenType() +} + +func (c *CommonTokenStream) lazyInit() { + if c.index == -1 { + c.setup() + } +} + +func (c *CommonTokenStream) setup() { + c.Sync(0) + c.index = c.adjustSeekIndex(0) +} + +func (c *CommonTokenStream) GetTokenSource() TokenSource { + return c.tokenSource +} + +// SetTokenSource resets the c token stream by setting its token source. +func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) { + c.tokenSource = tokenSource + c.tokens = make([]Token, 0) + c.index = -1 +} + +// NextTokenOnChannel returns the index of the next token on channel given a +// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are +// no tokens on channel between i and EOF. +func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int { + c.Sync(i) + + if i >= len(c.tokens) { + return -1 + } + + token := c.tokens[i] + + for token.GetChannel() != c.channel { + if token.GetTokenType() == TokenEOF { + return -1 + } + + i++ + c.Sync(i) + token = c.tokens[i] + } + + return i +} + +// previousTokenOnChannel returns the index of the previous token on channel +// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if +// there are no tokens on channel between i and 0. +func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int { + for i >= 0 && c.tokens[i].GetChannel() != channel { + i-- + } + + return i +} + +// GetHiddenTokensToRight collects all tokens on a specified channel to the +// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL +// or EOF. If channel is -1, it finds any non-default channel token. +func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel) + from := tokenIndex + 1 + + // If no onchannel to the right, then nextOnChannel == -1, so set to to last token + var to int + + if nextOnChannel == -1 { + to = len(c.tokens) - 1 + } else { + to = nextOnChannel + } + + return c.filterForChannel(from, to, channel) +} + +// GetHiddenTokensToLeft collects all tokens on channel to the left of the +// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is +// -1, it finds any non default channel token. +func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token { + c.lazyInit() + + if tokenIndex < 0 || tokenIndex >= len(c.tokens) { + panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1)) + } + + prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel) + + if prevOnChannel == tokenIndex-1 { + return nil + } + + // If there are none on channel to the left and prevOnChannel == -1 then from = 0 + from := prevOnChannel + 1 + to := tokenIndex - 1 + + return c.filterForChannel(from, to, channel) +} + +func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token { + hidden := make([]Token, 0) + + for i := left; i < right+1; i++ { + t := c.tokens[i] + + if channel == -1 { + if t.GetChannel() != LexerDefaultTokenChannel { + hidden = append(hidden, t) + } + } else if t.GetChannel() == channel { + hidden = append(hidden, t) + } + } + + if len(hidden) == 0 { + return nil + } + + return hidden +} + +func (c *CommonTokenStream) GetSourceName() string { + return c.tokenSource.GetSourceName() +} + +func (c *CommonTokenStream) Size() int { + return len(c.tokens) +} + +func (c *CommonTokenStream) Index() int { + return c.index +} + +func (c *CommonTokenStream) GetAllText() string { + return c.GetTextFromInterval(nil) +} + +func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string { + if start == nil || end == nil { + return "" + } + + return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex())) +} + +func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string { + return c.GetTextFromInterval(interval.GetSourceInterval()) +} + +func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string { + c.lazyInit() + c.Fill() + + if interval == nil { + interval = NewInterval(0, len(c.tokens)-1) + } + + start := interval.Start + stop := interval.Stop + + if start < 0 || stop < 0 { + return "" + } + + if stop >= len(c.tokens) { + stop = len(c.tokens) - 1 + } + + s := "" + + for i := start; i < stop+1; i++ { + t := c.tokens[i] + + if t.GetTokenType() == TokenEOF { + break + } + + s += t.GetText() + } + + return s +} + +// Fill gets all tokens from the lexer until EOF. +func (c *CommonTokenStream) Fill() { + c.lazyInit() + + for c.fetch(1000) == 1000 { + continue + } +} + +func (c *CommonTokenStream) adjustSeekIndex(i int) int { + return c.NextTokenOnChannel(i, c.channel) +} + +func (c *CommonTokenStream) LB(k int) Token { + if k == 0 || c.index-k < 0 { + return nil + } + + i := c.index + n := 1 + + // Find k good tokens looking backward + for n <= k { + // Skip off-channel tokens + i = c.previousTokenOnChannel(i-1, c.channel) + n++ + } + + if i < 0 { + return nil + } + + return c.tokens[i] +} + +func (c *CommonTokenStream) LT(k int) Token { + c.lazyInit() + + if k == 0 { + return nil + } + + if k < 0 { + return c.LB(-k) + } + + i := c.index + n := 1 // We know tokens[n] is valid + + // Find k good tokens + for n < k { + // Skip off-channel tokens, but make sure to not look past EOF + if c.Sync(i + 1) { + i = c.NextTokenOnChannel(i+1, c.channel) + } + + n++ + } + + return c.tokens[i] +} + +// getNumberOfOnChannelTokens counts EOF once. +func (c *CommonTokenStream) getNumberOfOnChannelTokens() int { + var n int + + c.Fill() + + for i := 0; i < len(c.tokens); i++ { + t := c.tokens[i] + + if t.GetChannel() == c.channel { + n++ + } + + if t.GetTokenType() == TokenEOF { + break + } + } + + return n +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go new file mode 100644 index 0000000000..d6079aa203 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go @@ -0,0 +1,171 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "sort" + "sync" +) + +type DFA struct { + // atnStartState is the ATN state in which this was created + atnStartState DecisionState + + decision int + + // states is all the DFA states. Use Map to get the old state back; Set can only + // indicate whether it is there. + states map[int]*DFAState + statesMu sync.RWMutex + + s0 *DFAState + s0Mu sync.RWMutex + + // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa. + // True if the DFA is for a precedence decision and false otherwise. + precedenceDfa bool +} + +func NewDFA(atnStartState DecisionState, decision int) *DFA { + return &DFA{ + atnStartState: atnStartState, + decision: decision, + states: make(map[int]*DFAState), + } +} + +// getPrecedenceStartState gets the start state for the current precedence and +// returns the start state corresponding to the specified precedence if a start +// state exists for the specified precedence and nil otherwise. d must be a +// precedence DFA. See also isPrecedenceDfa. +func (d *DFA) getPrecedenceStartState(precedence int) *DFAState { + if !d.precedenceDfa { + panic("only precedence DFAs may contain a precedence start state") + } + + d.s0Mu.RLock() + defer d.s0Mu.RUnlock() + + // s0.edges is never nil for a precedence DFA + if precedence < 0 || precedence >= len(d.s0.edges) { + return nil + } + + return d.s0.edges[precedence] +} + +// setPrecedenceStartState sets the start state for the current precedence. d +// must be a precedence DFA. See also isPrecedenceDfa. +func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) { + if !d.precedenceDfa { + panic("only precedence DFAs may contain a precedence start state") + } + + if precedence < 0 { + return + } + + d.s0Mu.Lock() + defer d.s0Mu.Unlock() + + // Synchronization on s0 here is ok. When the DFA is turned into a + // precedence DFA, s0 will be initialized once and not updated again. s0.edges + // is never nil for a precedence DFA. + if precedence >= len(d.s0.edges) { + d.s0.edges = append(d.s0.edges, make([]*DFAState, precedence+1-len(d.s0.edges))...) + } + + d.s0.edges[precedence] = startState +} + +// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs +// from the current DFA configuration, then d.states is cleared, the initial +// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to +// store the start states for individual precedence values if precedenceDfa is +// true or nil otherwise, and d.precedenceDfa is updated. +func (d *DFA) setPrecedenceDfa(precedenceDfa bool) { + if d.precedenceDfa != precedenceDfa { + d.states = make(map[int]*DFAState) + + if precedenceDfa { + precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false)) + + precedenceState.edges = make([]*DFAState, 0) + precedenceState.isAcceptState = false + precedenceState.requiresFullContext = false + d.s0 = precedenceState + } else { + d.s0 = nil + } + + d.precedenceDfa = precedenceDfa + } +} + +func (d *DFA) getS0() *DFAState { + d.s0Mu.RLock() + defer d.s0Mu.RUnlock() + return d.s0 +} + +func (d *DFA) setS0(s *DFAState) { + d.s0Mu.Lock() + defer d.s0Mu.Unlock() + d.s0 = s +} + +func (d *DFA) getState(hash int) (*DFAState, bool) { + d.statesMu.RLock() + defer d.statesMu.RUnlock() + s, ok := d.states[hash] + return s, ok +} + +func (d *DFA) setState(hash int, state *DFAState) { + d.statesMu.Lock() + defer d.statesMu.Unlock() + d.states[hash] = state +} + +func (d *DFA) numStates() int { + d.statesMu.RLock() + defer d.statesMu.RUnlock() + return len(d.states) +} + +type dfaStateList []*DFAState + +func (d dfaStateList) Len() int { return len(d) } +func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber } +func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] } + +// sortedStates returns the states in d sorted by their state number. +func (d *DFA) sortedStates() []*DFAState { + vs := make([]*DFAState, 0, len(d.states)) + + for _, v := range d.states { + vs = append(vs, v) + } + + sort.Sort(dfaStateList(vs)) + + return vs +} + +func (d *DFA) String(literalNames []string, symbolicNames []string) string { + if d.s0 == nil { + return "" + } + + return NewDFASerializer(d, literalNames, symbolicNames).String() +} + +func (d *DFA) ToLexerString() string { + if d.s0 == nil { + return "" + } + + return NewLexerDFASerializer(d).String() +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go new file mode 100644 index 0000000000..4c0f690229 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go @@ -0,0 +1,152 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// DFASerializer is a DFA walker that knows how to dump them to serialized +// strings. +type DFASerializer struct { + dfa *DFA + literalNames []string + symbolicNames []string +} + +func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer { + if literalNames == nil { + literalNames = make([]string, 0) + } + + if symbolicNames == nil { + symbolicNames = make([]string, 0) + } + + return &DFASerializer{ + dfa: dfa, + literalNames: literalNames, + symbolicNames: symbolicNames, + } +} + +func (d *DFASerializer) String() string { + if d.dfa.s0 == nil { + return "" + } + + buf := "" + states := d.dfa.sortedStates() + + for _, s := range states { + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += d.GetStateString(s) + buf += "-" + buf += d.getEdgeLabel(j) + buf += "->" + buf += d.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} + +func (d *DFASerializer) getEdgeLabel(i int) string { + if i == 0 { + return "EOF" + } else if d.literalNames != nil && i-1 < len(d.literalNames) { + return d.literalNames[i-1] + } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) { + return d.symbolicNames[i-1] + } + + return strconv.Itoa(i - 1) +} + +func (d *DFASerializer) GetStateString(s *DFAState) string { + var a, b string + + if s.isAcceptState { + a = ":" + } + + if s.requiresFullContext { + b = "^" + } + + baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b + + if s.isAcceptState { + if s.predicates != nil { + return baseStateStr + "=>" + fmt.Sprint(s.predicates) + } + + return baseStateStr + "=>" + fmt.Sprint(s.prediction) + } + + return baseStateStr +} + +type LexerDFASerializer struct { + *DFASerializer +} + +func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer { + return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)} +} + +func (l *LexerDFASerializer) getEdgeLabel(i int) string { + return "'" + string(i) + "'" +} + +func (l *LexerDFASerializer) String() string { + if l.dfa.s0 == nil { + return "" + } + + buf := "" + states := l.dfa.sortedStates() + + for i := 0; i < len(states); i++ { + s := states[i] + + if s.edges != nil { + n := len(s.edges) + + for j := 0; j < n; j++ { + t := s.edges[j] + + if t != nil && t.stateNumber != 0x7FFFFFFF { + buf += l.GetStateString(s) + buf += "-" + buf += l.getEdgeLabel(j) + buf += "->" + buf += l.GetStateString(t) + buf += "\n" + } + } + } + } + + if len(buf) == 0 { + return "" + } + + return buf +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go new file mode 100644 index 0000000000..38e918ad91 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go @@ -0,0 +1,166 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" +) + +// PredPrediction maps a predicate to a predicted alternative. +type PredPrediction struct { + alt int + pred SemanticContext +} + +func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction { + return &PredPrediction{alt: alt, pred: pred} +} + +func (p *PredPrediction) String() string { + return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")" +} + +// DFAState represents a set of possible ATN configurations. As Aho, Sethi, +// Ullman p. 117 says: "The DFA uses its state to keep track of all possible +// states the ATN can be in after reading each input symbol. That is to say, +// after reading input a1a2..an, the DFA is in a state that represents the +// subset T of the states of the ATN that are reachable from the ATN's start +// state along some path labeled a1a2..an." In conventional NFA-to-DFA +// conversion, therefore, the subset T would be a bitset representing the set of +// states the ATN could be in. We need to track the alt predicted by each state +// as well, however. More importantly, we need to maintain a stack of states, +// tracking the closure operations as they jump from rule to rule, emulating +// rule invocations (method calls). I have to add a stack to simulate the proper +// lookahead sequences for the underlying LL grammar from which the ATN was +// derived. +// +// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a +// state (ala normal conversion) and a RuleContext describing the chain of rules +// (if any) followed to arrive at that state. +// +// A DFAState may have multiple references to a particular state, but with +// different ATN contexts (with same or different alts) meaning that state was +// reached via a different set of rule invocations. +type DFAState struct { + stateNumber int + configs ATNConfigSet + + // edges elements point to the target of the symbol. Shift up by 1 so (-1) + // Token.EOF maps to the first element. + edges []*DFAState + + isAcceptState bool + + // prediction is the ttype we match or alt we predict if the state is accept. + // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or + // requiresFullContext. + prediction int + + lexerActionExecutor *LexerActionExecutor + + // requiresFullContext indicates it was created during an SLL prediction that + // discovered a conflict between the configurations in the state. Future + // ParserATNSimulator.execATN invocations immediately jump doing + // full context prediction if true. + requiresFullContext bool + + // predicates is the predicates associated with the ATN configurations of the + // DFA state during SLL parsing. When we have predicates, requiresFullContext + // is false, since full context prediction evaluates predicates on-the-fly. If + // d is + // not nil, then prediction is ATN.INVALID_ALT_NUMBER. + // + // We only use these for non-requiresFullContext but conflicting states. That + // means we know from the context (it's $ or we don't dip into outer context) + // that it's an ambiguity not a conflict. + // + // This list is computed by + // ParserATNSimulator.predicateDFAState. + predicates []*PredPrediction +} + +func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState { + if configs == nil { + configs = NewBaseATNConfigSet(false) + } + + return &DFAState{configs: configs, stateNumber: stateNumber} +} + +// GetAltSet gets the set of all alts mentioned by all ATN configurations in d. +func (d *DFAState) GetAltSet() *Set { + alts := NewSet(nil, nil) + + if d.configs != nil { + for _, c := range d.configs.GetItems() { + alts.add(c.GetAlt()) + } + } + + if alts.length() == 0 { + return nil + } + + return alts +} + +func (d *DFAState) setPrediction(v int) { + d.prediction = v +} + +// equals returns whether d equals other. Two DFAStates are equal if their ATN +// configuration sets are the same. This method is used to see if a state +// already exists. +// +// Because the number of alternatives and number of ATN configurations are +// finite, there is a finite number of DFA states that can be processed. This is +// necessary to show that the algorithm terminates. +// +// Cannot test the DFA state numbers here because in +// ParserATNSimulator.addDFAState we need to know if any other state exists that +// has d exact set of ATN configurations. The stateNumber is irrelevant. +func (d *DFAState) equals(other interface{}) bool { + if d == other { + return true + } else if _, ok := other.(*DFAState); !ok { + return false + } + + return d.configs.Equals(other.(*DFAState).configs) +} + +func (d *DFAState) String() string { + var s string + if d.isAcceptState { + if d.predicates != nil { + s = "=>" + fmt.Sprint(d.predicates) + } else { + s = "=>" + fmt.Sprint(d.prediction) + } + } + + return fmt.Sprintf("%d:%s%s", fmt.Sprint(d.configs), s) +} + +func (d *DFAState) hash() int { + h := murmurInit(11) + + c := 1 + if d.isAcceptState { + if d.predicates != nil { + for _, p := range d.predicates { + h = murmurUpdate(h, p.alt) + h = murmurUpdate(h, p.pred.hash()) + c += 2 + } + } else { + h = murmurUpdate(h, d.prediction) + c += 1 + } + } + + h = murmurUpdate(h, d.configs.hash()) + return murmurFinish(h, c) +} \ No newline at end of file diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go new file mode 100644 index 0000000000..1fec43d9dc --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go @@ -0,0 +1,111 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// +// This implementation of {@link ANTLRErrorListener} can be used to identify +// certain potential correctness and performance problems in grammars. "reports" +// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate +// message. +// +//
    +//
  • Ambiguities: These are cases where more than one path through the +// grammar can Match the input.
  • +//
  • Weak context sensitivity: These are cases where full-context +// prediction resolved an SLL conflict to a unique alternative which equaled the +// minimum alternative of the SLL conflict.
  • +//
  • Strong (forced) context sensitivity: These are cases where the +// full-context prediction resolved an SLL conflict to a unique alternative, +// and the minimum alternative of the SLL conflict was found to not be +// a truly viable alternative. Two-stage parsing cannot be used for inputs where +// d situation occurs.
  • +//
+ +type DiagnosticErrorListener struct { + *DefaultErrorListener + + exactOnly bool +} + +func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener { + + n := new(DiagnosticErrorListener) + + // whether all ambiguities or only exact ambiguities are Reported. + n.exactOnly = exactOnly + return n +} + +func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if d.exactOnly && !exact { + return + } + msg := "reportAmbiguity d=" + + d.getDecisionDescription(recognizer, dfa) + + ": ambigAlts=" + + d.getConflictingAlts(ambigAlts, configs).String() + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + + msg := "reportAttemptingFullContext d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + msg := "reportContextSensitivity d=" + + d.getDecisionDescription(recognizer, dfa) + + ", input='" + + recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'" + recognizer.NotifyErrorListeners(msg, nil, nil) +} + +func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string { + decision := dfa.decision + ruleIndex := dfa.atnStartState.GetRuleIndex() + + ruleNames := recognizer.GetRuleNames() + if ruleIndex < 0 || ruleIndex >= len(ruleNames) { + return strconv.Itoa(decision) + } + ruleName := ruleNames[ruleIndex] + if ruleName == "" { + return strconv.Itoa(decision) + } + return strconv.Itoa(decision) + " (" + ruleName + ")" +} + +// +// Computes the set of conflicting or ambiguous alternatives from a +// configuration set, if that information was not already provided by the +// parser. +// +// @param ReportedAlts The set of conflicting or ambiguous alternatives, as +// Reported by the parser. +// @param configs The conflicting or ambiguous configuration set. +// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise +// returns the set of alternatives represented in {@code configs}. +// +func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet { + if ReportedAlts != nil { + return ReportedAlts + } + result := NewBitSet() + for _, c := range set.GetItems() { + result.add(c.GetAlt()) + } + + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go new file mode 100644 index 0000000000..028e1a9d7f --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go @@ -0,0 +1,108 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "os" + "strconv" +) + +// Provides an empty default implementation of {@link ANTLRErrorListener}. The +// default implementation of each method does nothing, but can be overridden as +// necessary. + +type ErrorListener interface { + SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) + ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) + ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) + ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) +} + +type DefaultErrorListener struct { +} + +func NewDefaultErrorListener() *DefaultErrorListener { + return new(DefaultErrorListener) +} + +func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { +} + +func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { +} + +func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { +} + +type ConsoleErrorListener struct { + *DefaultErrorListener +} + +func NewConsoleErrorListener() *ConsoleErrorListener { + return new(ConsoleErrorListener) +} + +// +// Provides a default instance of {@link ConsoleErrorListener}. +// +var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener() + +// +// {@inheritDoc} +// +//

+// This implementation prints messages to {@link System//err} containing the +// values of {@code line}, {@code charPositionInLine}, and {@code msg} using +// the following format.

+// +//
+// line line:charPositionInLine msg
+// 
+// +func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg) +} + +type ProxyErrorListener struct { + *DefaultErrorListener + delegates []ErrorListener +} + +func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener { + if delegates == nil { + panic("delegates is not provided") + } + l := new(ProxyErrorListener) + l.delegates = delegates + return l +} + +func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) { + for _, d := range p.delegates { + d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e) + } +} + +func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) { + for _, d := range p.delegates { + d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go new file mode 100644 index 0000000000..977a6e4549 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go @@ -0,0 +1,758 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "reflect" + "strconv" + "strings" +) + +type ErrorStrategy interface { + reset(Parser) + RecoverInline(Parser) Token + Recover(Parser, RecognitionException) + Sync(Parser) + inErrorRecoveryMode(Parser) bool + ReportError(Parser, RecognitionException) + ReportMatch(Parser) +} + +// This is the default implementation of {@link ANTLRErrorStrategy} used for +// error Reporting and recovery in ANTLR parsers. +// +type DefaultErrorStrategy struct { + errorRecoveryMode bool + lastErrorIndex int + lastErrorStates *IntervalSet +} + +var _ ErrorStrategy = &DefaultErrorStrategy{} + +func NewDefaultErrorStrategy() *DefaultErrorStrategy { + + d := new(DefaultErrorStrategy) + + // Indicates whether the error strategy is currently "recovering from an + // error". This is used to suppress Reporting multiple error messages while + // attempting to recover from a detected syntax error. + // + // @see //inErrorRecoveryMode + // + d.errorRecoveryMode = false + + // The index into the input stream where the last error occurred. + // This is used to prevent infinite loops where an error is found + // but no token is consumed during recovery...another error is found, + // ad nauseum. This is a failsafe mechanism to guarantee that at least + // one token/tree node is consumed for two errors. + // + d.lastErrorIndex = -1 + d.lastErrorStates = nil + return d +} + +//

The default implementation simply calls {@link //endErrorCondition} to +// ensure that the handler is not in error recovery mode.

+func (d *DefaultErrorStrategy) reset(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// +// This method is called to enter error recovery mode when a recognition +// exception is Reported. +// +// @param recognizer the parser instance +// +func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) { + d.errorRecoveryMode = true +} + +func (d *DefaultErrorStrategy) inErrorRecoveryMode(recognizer Parser) bool { + return d.errorRecoveryMode +} + +// +// This method is called to leave error recovery mode after recovering from +// a recognition exception. +// +// @param recognizer +// +func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) { + d.errorRecoveryMode = false + d.lastErrorStates = nil + d.lastErrorIndex = -1 +} + +// +// {@inheritDoc} +// +//

The default implementation simply calls {@link //endErrorCondition}.

+// +func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) { + d.endErrorCondition(recognizer) +} + +// +// {@inheritDoc} +// +//

The default implementation returns immediately if the handler is already +// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition} +// and dispatches the Reporting task based on the runtime type of {@code e} +// according to the following table.

+// +//
    +//
  • {@link NoViableAltException}: Dispatches the call to +// {@link //ReportNoViableAlternative}
  • +//
  • {@link InputMisMatchException}: Dispatches the call to +// {@link //ReportInputMisMatch}
  • +//
  • {@link FailedPredicateException}: Dispatches the call to +// {@link //ReportFailedPredicate}
  • +//
  • All other types: calls {@link Parser//NotifyErrorListeners} to Report +// the exception
  • +//
+// +func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) { + // if we've already Reported an error and have not Matched a token + // yet successfully, don't Report any errors. + if d.inErrorRecoveryMode(recognizer) { + return // don't Report spurious errors + } + d.beginErrorCondition(recognizer) + + switch t := e.(type) { + default: + fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name()) + // fmt.Println(e.stack) + recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e) + case *NoViableAltException: + d.ReportNoViableAlternative(recognizer, t) + case *InputMisMatchException: + d.ReportInputMisMatch(recognizer, t) + case *FailedPredicateException: + d.ReportFailedPredicate(recognizer, t) + } +} + +// {@inheritDoc} +// +//

The default implementation reSynchronizes the parser by consuming tokens +// until we find one in the reSynchronization set--loosely the set of tokens +// that can follow the current rule.

+// +func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + + if d.lastErrorIndex == recognizer.GetInputStream().Index() && + d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) { + // uh oh, another error at same token index and previously-Visited + // state in ATN must be a case where LT(1) is in the recovery + // token set so nothing got consumed. Consume a single token + // at least to prevent an infinite loop d is a failsafe. + recognizer.Consume() + } + d.lastErrorIndex = recognizer.GetInputStream().Index() + if d.lastErrorStates == nil { + d.lastErrorStates = NewIntervalSet() + } + d.lastErrorStates.addOne(recognizer.GetState()) + followSet := d.getErrorRecoverySet(recognizer) + d.consumeUntil(recognizer, followSet) +} + +// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure +// that the current lookahead symbol is consistent with what were expecting +// at d point in the ATN. You can call d anytime but ANTLR only +// generates code to check before subrules/loops and each iteration. +// +//

Implements Jim Idle's magic Sync mechanism in closures and optional +// subrules. E.g.,

+// +//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+// 
+// +// At the start of a sub rule upon error, {@link //Sync} performs single +// token deletion, if possible. If it can't do that, it bails on the current +// rule and uses the default error recovery, which consumes until the +// reSynchronization set of the current rule. +// +//

If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block +// with an empty alternative), then the expected set includes what follows +// the subrule.

+// +//

During loop iteration, it consumes until it sees a token that can start a +// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to +// stay in the loop as long as possible.

+// +//

ORIGINS

+// +//

Previous versions of ANTLR did a poor job of their recovery within loops. +// A single mismatch token or missing token would force the parser to bail +// out of the entire rules surrounding the loop. So, for rule

+// +//
+// classfunc : 'class' ID '{' member* '}'
+// 
+// +// input with an extra token between members would force the parser to +// consume until it found the next class definition rather than the next +// member definition of the current class. +// +//

This functionality cost a little bit of effort because the parser has to +// compare token set at the start of the loop and at each iteration. If for +// some reason speed is suffering for you, you can turn off d +// functionality by simply overriding d method as a blank { }.

+// +func (d *DefaultErrorStrategy) Sync(recognizer Parser) { + // If already recovering, don't try to Sync + if d.inErrorRecoveryMode(recognizer) { + return + } + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + la := recognizer.GetTokenStream().LA(1) + + // try cheaper subset first might get lucky. seems to shave a wee bit off + nextTokens := recognizer.GetATN().NextTokens(s, nil) + if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) { + return + } + + switch s.GetStateType() { + case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry: + // Report error and recover if possible + if d.SingleTokenDeletion(recognizer) != nil { + return + } + panic(NewInputMisMatchException(recognizer)) + case ATNStatePlusLoopBack, ATNStateStarLoopBack: + d.ReportUnwantedToken(recognizer) + expecting := NewIntervalSet() + expecting.addSet(recognizer.GetExpectedTokens()) + whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer)) + d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule) + default: + // do nothing if we can't identify the exact kind of ATN state + } +} + +// This is called by {@link //ReportError} when the exception is a +// {@link NoViableAltException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +// +func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) { + tokens := recognizer.GetTokenStream() + var input string + if tokens != nil { + if e.startToken.GetTokenType() == TokenEOF { + input = "" + } else { + input = tokens.GetTextFromTokens(e.startToken, e.offendingToken) + } + } else { + input = "" + } + msg := "no viable alternative at input " + d.escapeWSAndQuote(input) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// +// This is called by {@link //ReportError} when the exception is an +// {@link InputMisMatchException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +// +func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) { + msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) + + " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// +// This is called by {@link //ReportError} when the exception is a +// {@link FailedPredicateException}. +// +// @see //ReportError +// +// @param recognizer the parser instance +// @param e the recognition exception +// +func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) { + ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()] + msg := "rule " + ruleName + " " + e.message + recognizer.NotifyErrorListeners(msg, e.offendingToken, e) +} + +// This method is called to Report a syntax error which requires the removal +// of a token from the input stream. At the time d method is called, the +// erroneous symbol is current {@code LT(1)} symbol and has not yet been +// removed from the input stream. When d method returns, +// {@code recognizer} is in error recovery mode. +// +//

This method is called when {@link //singleTokenDeletion} identifies +// single-token deletion as a viable recovery strategy for a mismatched +// input error.

+// +//

The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

+// +// @param recognizer the parser instance +// +func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) { + if d.inErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + tokenName := d.GetTokenErrorDisplay(t) + expecting := d.GetExpectedTokens(recognizer) + msg := "extraneous input " + tokenName + " expecting " + + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +// This method is called to Report a syntax error which requires the +// insertion of a missing token into the input stream. At the time d +// method is called, the missing token has not yet been inserted. When d +// method returns, {@code recognizer} is in error recovery mode. +// +//

This method is called when {@link //singleTokenInsertion} identifies +// single-token insertion as a viable recovery strategy for a mismatched +// input error.

+// +//

The default implementation simply returns if the handler is already in +// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to +// enter error recovery mode, followed by calling +// {@link Parser//NotifyErrorListeners}.

+// +// @param recognizer the parser instance +// +func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) { + if d.inErrorRecoveryMode(recognizer) { + return + } + d.beginErrorCondition(recognizer) + t := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) + + " at " + d.GetTokenErrorDisplay(t) + recognizer.NotifyErrorListeners(msg, t, nil) +} + +//

The default implementation attempts to recover from the mismatched input +// by using single token insertion and deletion as described below. If the +// recovery attempt fails, d method panics an +// {@link InputMisMatchException}.

+// +//

EXTRA TOKEN (single token deletion)

+// +//

{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the +// right token, however, then assume {@code LA(1)} is some extra spurious +// token and delete it. Then consume and return the next token (which was +// the {@code LA(2)} token) as the successful result of the Match operation.

+// +//

This recovery strategy is implemented by {@link +// //singleTokenDeletion}.

+// +//

MISSING TOKEN (single token insertion)

+// +//

If current token (at {@code LA(1)}) is consistent with what could come +// after the expected {@code LA(1)} token, then assume the token is missing +// and use the parser's {@link TokenFactory} to create it on the fly. The +// "insertion" is performed by returning the created token as the successful +// result of the Match operation.

+// +//

This recovery strategy is implemented by {@link +// //singleTokenInsertion}.

+// +//

EXAMPLE

+// +//

For example, Input {@code i=(3} is clearly missing the {@code ')'}. When +// the parser returns from the nested call to {@code expr}, it will have +// call chain:

+// +//
+// stat &rarr expr &rarr atom
+// 
+// +// and it will be trying to Match the {@code ')'} at d point in the +// derivation: +// +//
+// => ID '=' '(' INT ')' ('+' atom)* ''
+// ^
+// 
+// +// The attempt to Match {@code ')'} will fail when it sees {@code ''} and +// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''} +// is in the set of tokens that can follow the {@code ')'} token reference +// in rule {@code atom}. It can assume that you forgot the {@code ')'}. +// +func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token { + // SINGLE TOKEN DELETION + MatchedSymbol := d.SingleTokenDeletion(recognizer) + if MatchedSymbol != nil { + // we have deleted the extra token. + // now, move past ttype token as if all were ok + recognizer.Consume() + return MatchedSymbol + } + // SINGLE TOKEN INSERTION + if d.SingleTokenInsertion(recognizer) { + return d.GetMissingSymbol(recognizer) + } + // even that didn't work must panic the exception + panic(NewInputMisMatchException(recognizer)) +} + +// +// This method implements the single-token insertion inline error recovery +// strategy. It is called by {@link //recoverInline} if the single-token +// deletion strategy fails to recover from the mismatched input. If this +// method returns {@code true}, {@code recognizer} will be in error recovery +// mode. +// +//

This method determines whether or not single-token insertion is viable by +// checking if the {@code LA(1)} input symbol could be successfully Matched +// if it were instead the {@code LA(2)} symbol. If d method returns +// {@code true}, the caller is responsible for creating and inserting a +// token with the correct type to produce d behavior.

+// +// @param recognizer the parser instance +// @return {@code true} if single-token insertion is a viable recovery +// strategy for the current mismatched input, otherwise {@code false} +// +func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool { + currentSymbolType := recognizer.GetTokenStream().LA(1) + // if current token is consistent with what could come after current + // ATN state, then we know we're missing a token error recovery + // is free to conjure up and insert the missing token + atn := recognizer.GetInterpreter().atn + currentState := atn.states[recognizer.GetState()] + next := currentState.GetTransitions()[0].getTarget() + expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext()) + if expectingAtLL2.contains(currentSymbolType) { + d.ReportMissingToken(recognizer) + return true + } + + return false +} + +// This method implements the single-token deletion inline error recovery +// strategy. It is called by {@link //recoverInline} to attempt to recover +// from mismatched input. If this method returns nil, the parser and error +// handler state will not have changed. If this method returns non-nil, +// {@code recognizer} will not be in error recovery mode since the +// returned token was a successful Match. +// +//

If the single-token deletion is successful, d method calls +// {@link //ReportUnwantedToken} to Report the error, followed by +// {@link Parser//consume} to actually "delete" the extraneous token. Then, +// before returning {@link //ReportMatch} is called to signal a successful +// Match.

+// +// @param recognizer the parser instance +// @return the successfully Matched {@link Token} instance if single-token +// deletion successfully recovers from the mismatched input, otherwise +// {@code nil} +// +func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token { + NextTokenType := recognizer.GetTokenStream().LA(2) + expecting := d.GetExpectedTokens(recognizer) + if expecting.contains(NextTokenType) { + d.ReportUnwantedToken(recognizer) + // print("recoverFromMisMatchedToken deleting " \ + // + str(recognizer.GetTokenStream().LT(1)) \ + // + " since " + str(recognizer.GetTokenStream().LT(2)) \ + // + " is what we want", file=sys.stderr) + recognizer.Consume() // simply delete extra token + // we want to return the token we're actually Matching + MatchedSymbol := recognizer.GetCurrentToken() + d.ReportMatch(recognizer) // we know current token is correct + return MatchedSymbol + } + + return nil +} + +// Conjure up a missing token during error recovery. +// +// The recognizer attempts to recover from single missing +// symbols. But, actions might refer to that missing symbol. +// For example, x=ID {f($x)}. The action clearly assumes +// that there has been an identifier Matched previously and that +// $x points at that token. If that token is missing, but +// the next token in the stream is what we want we assume that +// d token is missing and we keep going. Because we +// have to return some token to replace the missing token, +// we have to conjure one up. This method gives the user control +// over the tokens returned for missing tokens. Mostly, +// you will want to create something special for identifier +// tokens. For literals such as '{' and ',', the default +// action in the parser or tree parser works. It simply creates +// a CommonToken of the appropriate type. The text will be the token. +// If you change what tokens must be created by the lexer, +// override d method to create the appropriate tokens. +// +func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token { + currentSymbol := recognizer.GetCurrentToken() + expecting := d.GetExpectedTokens(recognizer) + expectedTokenType := expecting.first() + var tokenText string + + if expectedTokenType == TokenEOF { + tokenText = "" + } else { + ln := recognizer.GetLiteralNames() + if expectedTokenType > 0 && expectedTokenType < len(ln) { + tokenText = "" + } else { + tokenText = "" // TODO matches the JS impl + } + } + current := currentSymbol + lookback := recognizer.GetTokenStream().LT(-1) + if current.GetTokenType() == TokenEOF && lookback != nil { + current = lookback + } + + tf := recognizer.GetTokenFactory() + + return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn()) +} + +func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet { + return recognizer.GetExpectedTokens() +} + +// How should a token be displayed in an error message? The default +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + return d.escapeWSAndQuote(s) +} + +func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string { + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + return "'" + s + "'" +} + +// Compute the error recovery set for the current rule. During +// rule invocation, the parser pushes the set of tokens that can +// follow that rule reference on the stack d amounts to +// computing FIRST of what follows the rule reference in the +// enclosing rule. See LinearApproximator.FIRST(). +// This local follow set only includes tokens +// from within the rule i.e., the FIRST computation done by +// ANTLR stops at the end of a rule. +// +// EXAMPLE +// +// When you find a "no viable alt exception", the input is not +// consistent with any of the alternatives for rule r. The best +// thing to do is to consume tokens until you see something that +// can legally follow a call to r//or* any rule that called r. +// You don't want the exact set of viable next tokens because the +// input might just be missing a token--you might consume the +// rest of the input looking for one of the missing tokens. +// +// Consider grammar: +// +// a : '[' b ']' +// | '(' b ')' +// +// b : c '^' INT +// c : ID +// | INT +// +// +// At each rule invocation, the set of tokens that could follow +// that rule is pushed on a stack. Here are the various +// context-sensitive follow sets: +// +// FOLLOW(b1_in_a) = FIRST(']') = ']' +// FOLLOW(b2_in_a) = FIRST(')') = ')' +// FOLLOW(c_in_b) = FIRST('^') = '^' +// +// Upon erroneous input "[]", the call chain is +// +// a -> b -> c +// +// and, hence, the follow context stack is: +// +// depth follow set start of rule execution +// 0 a (from main()) +// 1 ']' b +// 2 '^' c +// +// Notice that ')' is not included, because b would have to have +// been called from a different context in rule a for ')' to be +// included. +// +// For error recovery, we cannot consider FOLLOW(c) +// (context-sensitive or otherwise). We need the combined set of +// all context-sensitive FOLLOW sets--the set of all tokens that +// could follow any reference in the call chain. We need to +// reSync to one of those tokens. Note that FOLLOW(c)='^' and if +// we reSync'd to that token, we'd consume until EOF. We need to +// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}. +// In this case, for input "[]", LA(1) is ']' and in the set, so we would +// not consume anything. After printing an error, rule c would +// return normally. Rule b would not find the required '^' though. +// At this point, it gets a mismatched token error and panics an +// exception (since LA(1) is not in the viable following token +// set). The rule exception handler tries to recover, but finds +// the same recovery set and doesn't consume anything. Rule b +// exits normally returning to rule a. Now it finds the ']' (and +// with the successful Match exits errorRecovery mode). +// +// So, you can see that the parser walks up the call chain looking +// for the token that was a member of the recovery set. +// +// Errors are not generated in errorRecovery mode. +// +// ANTLR's error recovery mechanism is based upon original ideas: +// +// "Algorithms + Data Structures = Programs" by Niklaus Wirth +// +// and +// +// "A note on error recovery in recursive descent parsers": +// http://portal.acm.org/citation.cfm?id=947902.947905 +// +// Later, Josef Grosch had some good ideas: +// +// "Efficient and Comfortable Error Recovery in Recursive Descent +// Parsers": +// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip +// +// Like Grosch I implement context-sensitive FOLLOW sets that are combined +// at run-time upon error to avoid overhead during parsing. +// +func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet { + atn := recognizer.GetInterpreter().atn + ctx := recognizer.GetParserRuleContext() + recoverSet := NewIntervalSet() + for ctx != nil && ctx.GetInvokingState() >= 0 { + // compute what follows who invoked us + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + follow := atn.NextTokens(rt.(*RuleTransition).followState, nil) + recoverSet.addSet(follow) + ctx = ctx.GetParent().(ParserRuleContext) + } + recoverSet.removeOne(TokenEpsilon) + return recoverSet +} + +// Consume tokens until one Matches the given token set.// +func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) { + ttype := recognizer.GetTokenStream().LA(1) + for ttype != TokenEOF && !set.contains(ttype) { + recognizer.Consume() + ttype = recognizer.GetTokenStream().LA(1) + } +} + +// +// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors +// by immediately canceling the parse operation with a +// {@link ParseCancellationException}. The implementation ensures that the +// {@link ParserRuleContext//exception} field is set for all parse tree nodes +// that were not completed prior to encountering the error. +// +//

+// This error strategy is useful in the following scenarios.

+// +//
    +//
  • Two-stage parsing: This error strategy allows the first +// stage of two-stage parsing to immediately terminate if an error is +// encountered, and immediately fall back to the second stage. In addition to +// avoiding wasted work by attempting to recover from errors here, the empty +// implementation of {@link BailErrorStrategy//Sync} improves the performance of +// the first stage.
  • +//
  • Silent validation: When syntax errors are not being +// Reported or logged, and the parse result is simply ignored if errors occur, +// the {@link BailErrorStrategy} avoids wasting work on recovering from errors +// when the result will be ignored either way.
  • +//
+// +//

+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}

+// +// @see Parser//setErrorHandler(ANTLRErrorStrategy) + +type BailErrorStrategy struct { + *DefaultErrorStrategy +} + +var _ ErrorStrategy = &BailErrorStrategy{} + +func NewBailErrorStrategy() *BailErrorStrategy { + + b := new(BailErrorStrategy) + + b.DefaultErrorStrategy = NewDefaultErrorStrategy() + + return b +} + +// Instead of recovering from exception {@code e}, re-panic it wrapped +// in a {@link ParseCancellationException} so it is not caught by the +// rule func catches. Use {@link Exception//getCause()} to get the +// original {@link RecognitionException}. +// +func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) { + context := recognizer.GetParserRuleContext() + for context != nil { + context.SetException(e) + context = context.GetParent().(ParserRuleContext) + } + panic(NewParseCancellationException()) // TODO we don't emit e properly +} + +// Make sure we don't attempt to recover inline if the parser +// successfully recovers, it won't panic an exception. +// +func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token { + b.Recover(recognizer, NewInputMisMatchException(recognizer)) + + return nil +} + +// Make sure we don't attempt to recover from problems in subrules.// +func (b *BailErrorStrategy) Sync(recognizer Parser) { + // pass +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go new file mode 100644 index 0000000000..2ef74926ec --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go @@ -0,0 +1,241 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just +// 3 kinds of errors: prediction errors, failed predicate errors, and +// mismatched input errors. In each case, the parser knows where it is +// in the input, where it is in the ATN, the rule invocation stack, +// and what kind of problem occurred. + +type RecognitionException interface { + GetOffendingToken() Token + GetMessage() string + GetInputStream() IntStream +} + +type BaseRecognitionException struct { + message string + recognizer Recognizer + offendingToken Token + offendingState int + ctx RuleContext + input IntStream +} + +func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException { + + // todo + // Error.call(this) + // + // if (!!Error.captureStackTrace) { + // Error.captureStackTrace(this, RecognitionException) + // } else { + // stack := NewError().stack + // } + // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int + + t := new(BaseRecognitionException) + + t.message = message + t.recognizer = recognizer + t.input = input + t.ctx = ctx + // The current {@link Token} when an error occurred. Since not all streams + // support accessing symbols by index, we have to track the {@link Token} + // instance itself. + t.offendingToken = nil + // Get the ATN state number the parser was in at the time the error + // occurred. For {@link NoViableAltException} and + // {@link LexerNoViableAltException} exceptions, this is the + // {@link DecisionState} number. For others, it is the state whose outgoing + // edge we couldn't Match. + t.offendingState = -1 + if t.recognizer != nil { + t.offendingState = t.recognizer.GetState() + } + + return t +} + +func (b *BaseRecognitionException) GetMessage() string { + return b.message +} + +func (b *BaseRecognitionException) GetOffendingToken() Token { + return b.offendingToken +} + +func (b *BaseRecognitionException) GetInputStream() IntStream { + return b.input +} + +//

If the state number is not known, b method returns -1.

+ +// +// Gets the set of input symbols which could potentially follow the +// previously Matched symbol at the time b exception was panicn. +// +//

If the set of expected tokens is not known and could not be computed, +// b method returns {@code nil}.

+// +// @return The set of token types that could potentially follow the current +// state in the ATN, or {@code nil} if the information is not available. +// / +func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet { + if b.recognizer != nil { + return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx) + } + + return nil +} + +func (b *BaseRecognitionException) String() string { + return b.message +} + +type LexerNoViableAltException struct { + *BaseRecognitionException + + startIndex int + deadEndConfigs ATNConfigSet +} + +func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException { + + l := new(LexerNoViableAltException) + + l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil) + + l.startIndex = startIndex + l.deadEndConfigs = deadEndConfigs + + return l +} + +func (l *LexerNoViableAltException) String() string { + symbol := "" + if l.startIndex >= 0 && l.startIndex < l.input.Size() { + symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex)) + } + return "LexerNoViableAltException" + symbol +} + +type NoViableAltException struct { + *BaseRecognitionException + + startToken Token + offendingToken Token + ctx ParserRuleContext + deadEndConfigs ATNConfigSet +} + +// Indicates that the parser could not decide which of two or more paths +// to take based upon the remaining input. It tracks the starting token +// of the offending input and also knows where the parser was +// in the various paths when the error. Reported by ReportNoViableAlternative() +// +func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException { + + if ctx == nil { + ctx = recognizer.GetParserRuleContext() + } + + if offendingToken == nil { + offendingToken = recognizer.GetCurrentToken() + } + + if startToken == nil { + startToken = recognizer.GetCurrentToken() + } + + if input == nil { + input = recognizer.GetInputStream().(TokenStream) + } + + n := new(NoViableAltException) + n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx) + + // Which configurations did we try at input.Index() that couldn't Match + // input.LT(1)?// + n.deadEndConfigs = deadEndConfigs + // The token object at the start index the input stream might + // not be buffering tokens so get a reference to it. (At the + // time the error occurred, of course the stream needs to keep a + // buffer all of the tokens but later we might not have access to those.) + n.startToken = startToken + n.offendingToken = offendingToken + + return n +} + +type InputMisMatchException struct { + *BaseRecognitionException +} + +// This signifies any kind of mismatched input exceptions such as +// when the current input does not Match the expected token. +// +func NewInputMisMatchException(recognizer Parser) *InputMisMatchException { + + i := new(InputMisMatchException) + i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + i.offendingToken = recognizer.GetCurrentToken() + + return i + +} + +// A semantic predicate failed during validation. Validation of predicates +// occurs when normally parsing the alternative just like Matching a token. +// Disambiguating predicate evaluation occurs when we test a predicate during +// prediction. + +type FailedPredicateException struct { + *BaseRecognitionException + + ruleIndex int + predicateIndex int + predicate string +} + +func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException { + + f := new(FailedPredicateException) + + f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext()) + + s := recognizer.GetInterpreter().atn.states[recognizer.GetState()] + trans := s.GetTransitions()[0] + if trans2, ok := trans.(*PredicateTransition); ok { + f.ruleIndex = trans2.ruleIndex + f.predicateIndex = trans2.predIndex + } else { + f.ruleIndex = 0 + f.predicateIndex = 0 + } + f.predicate = predicate + f.offendingToken = recognizer.GetCurrentToken() + + return f +} + +func (f *FailedPredicateException) formatMessage(predicate, message string) string { + if message != "" { + return message + } + + return "failed predicate: {" + predicate + "}?" +} + +type ParseCancellationException struct { +} + +func NewParseCancellationException() *ParseCancellationException { + // Error.call(this) + // Error.captureStackTrace(this, ParseCancellationException) + return new(ParseCancellationException) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go new file mode 100644 index 0000000000..842170c086 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go @@ -0,0 +1,49 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "io" + "os" +) + +// This is an InputStream that is loaded from a file all at once +// when you construct the object. + +type FileStream struct { + *InputStream + + filename string +} + +func NewFileStream(fileName string) (*FileStream, error) { + + buf := bytes.NewBuffer(nil) + + f, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer f.Close() + _, err = io.Copy(buf, f) + if err != nil { + return nil, err + } + + fs := new(FileStream) + + fs.filename = fileName + s := string(buf.Bytes()) + + fs.InputStream = NewInputStream(s) + + return fs, nil + +} + +func (f *FileStream) GetSourceName() string { + return f.filename +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go new file mode 100644 index 0000000000..5ff270f536 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go @@ -0,0 +1,113 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type InputStream struct { + name string + index int + data []rune + size int +} + +func NewInputStream(data string) *InputStream { + + is := new(InputStream) + + is.name = "" + is.index = 0 + is.data = []rune(data) + is.size = len(is.data) // number of runes + + return is +} + +func (is *InputStream) reset() { + is.index = 0 +} + +func (is *InputStream) Consume() { + if is.index >= is.size { + // assert is.LA(1) == TokenEOF + panic("cannot consume EOF") + } + is.index++ +} + +func (is *InputStream) LA(offset int) int { + + if offset == 0 { + return 0 // nil + } + if offset < 0 { + offset++ // e.g., translate LA(-1) to use offset=0 + } + pos := is.index + offset - 1 + + if pos < 0 || pos >= is.size { // invalid + return TokenEOF + } + + return int(is.data[pos]) +} + +func (is *InputStream) LT(offset int) int { + return is.LA(offset) +} + +func (is *InputStream) Index() int { + return is.index +} + +func (is *InputStream) Size() int { + return is.size +} + +// mark/release do nothing we have entire buffer +func (is *InputStream) Mark() int { + return -1 +} + +func (is *InputStream) Release(marker int) { +} + +func (is *InputStream) Seek(index int) { + if index <= is.index { + is.index = index // just jump don't update stream state (line,...) + return + } + // seek forward + is.index = intMin(index, is.size) +} + +func (is *InputStream) GetText(start int, stop int) string { + if stop >= is.size { + stop = is.size - 1 + } + if start >= is.size { + return "" + } + + return string(is.data[start : stop+1]) +} + +func (is *InputStream) GetTextFromTokens(start, stop Token) string { + if start != nil && stop != nil { + return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex())) + } + + return "" +} + +func (is *InputStream) GetTextFromInterval(i *Interval) string { + return is.GetText(i.Start, i.Stop) +} + +func (*InputStream) GetSourceName() string { + return "Obtained from string" +} + +func (is *InputStream) String() string { + return string(is.data) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go new file mode 100644 index 0000000000..438e0ea6e7 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go @@ -0,0 +1,16 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type IntStream interface { + Consume() + LA(int) int + Mark() int + Release(marker int) + Index() int + Seek(index int) + Size() int + GetSourceName() string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go new file mode 100644 index 0000000000..510d909114 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go @@ -0,0 +1,296 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type Interval struct { + Start int + Stop int +} + +/* stop is not included! */ +func NewInterval(start, stop int) *Interval { + i := new(Interval) + + i.Start = start + i.Stop = stop + return i +} + +func (i *Interval) Contains(item int) bool { + return item >= i.Start && item < i.Stop +} + +func (i *Interval) String() string { + if i.Start == i.Stop-1 { + return strconv.Itoa(i.Start) + } + + return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1) +} + +func (i *Interval) length() int { + return i.Stop - i.Start +} + +type IntervalSet struct { + intervals []*Interval + readOnly bool +} + +func NewIntervalSet() *IntervalSet { + + i := new(IntervalSet) + + i.intervals = nil + i.readOnly = false + + return i +} + +func (i *IntervalSet) first() int { + if len(i.intervals) == 0 { + return TokenInvalidType + } + + return i.intervals[0].Start +} + +func (i *IntervalSet) addOne(v int) { + i.addInterval(NewInterval(v, v+1)) +} + +func (i *IntervalSet) addRange(l, h int) { + i.addInterval(NewInterval(l, h+1)) +} + +func (i *IntervalSet) addInterval(v *Interval) { + if i.intervals == nil { + i.intervals = make([]*Interval, 0) + i.intervals = append(i.intervals, v) + } else { + // find insert pos + for k, interval := range i.intervals { + // distinct range -> insert + if v.Stop < interval.Start { + i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...) + return + } else if v.Stop == interval.Start { + i.intervals[k].Start = v.Start + return + } else if v.Start <= interval.Stop { + i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop)) + + // if not applying to end, merge potential overlaps + if k < len(i.intervals)-1 { + l := i.intervals[k] + r := i.intervals[k+1] + // if r contained in l + if l.Stop >= r.Stop { + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } else if l.Stop >= r.Start { // partial overlap + i.intervals[k] = NewInterval(l.Start, r.Stop) + i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...) + } + } + return + } + } + // greater than any exiting + i.intervals = append(i.intervals, v) + } +} + +func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet { + if other.intervals != nil { + for k := 0; k < len(other.intervals); k++ { + i2 := other.intervals[k] + i.addInterval(NewInterval(i2.Start, i2.Stop)) + } + } + return i +} + +func (i *IntervalSet) complement(start int, stop int) *IntervalSet { + result := NewIntervalSet() + result.addInterval(NewInterval(start, stop+1)) + for j := 0; j < len(i.intervals); j++ { + result.removeRange(i.intervals[j]) + } + return result +} + +func (i *IntervalSet) contains(item int) bool { + if i.intervals == nil { + return false + } + for k := 0; k < len(i.intervals); k++ { + if i.intervals[k].Contains(item) { + return true + } + } + return false +} + +func (i *IntervalSet) length() int { + len := 0 + + for _, v := range i.intervals { + len += v.length() + } + + return len +} + +func (i *IntervalSet) removeRange(v *Interval) { + if v.Start == v.Stop-1 { + i.removeOne(v.Start) + } else if i.intervals != nil { + k := 0 + for n := 0; n < len(i.intervals); n++ { + ni := i.intervals[k] + // intervals are ordered + if v.Stop <= ni.Start { + return + } else if v.Start > ni.Start && v.Stop < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + x := NewInterval(v.Stop, ni.Stop) + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } else if v.Start <= ni.Start && v.Stop >= ni.Stop { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + k = k - 1 // need another pass + } else if v.Start < ni.Stop { + i.intervals[k] = NewInterval(ni.Start, v.Start) + } else if v.Stop < ni.Stop { + i.intervals[k] = NewInterval(v.Stop, ni.Stop) + } + k++ + } + } +} + +func (i *IntervalSet) removeOne(v int) { + if i.intervals != nil { + for k := 0; k < len(i.intervals); k++ { + ki := i.intervals[k] + // intervals i ordered + if v < ki.Start { + return + } else if v == ki.Start && v == ki.Stop-1 { + // i.intervals.splice(k, 1) + i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...) + return + } else if v == ki.Start { + i.intervals[k] = NewInterval(ki.Start+1, ki.Stop) + return + } else if v == ki.Stop-1 { + i.intervals[k] = NewInterval(ki.Start, ki.Stop-1) + return + } else if v < ki.Stop-1 { + x := NewInterval(ki.Start, v) + ki.Start = v + 1 + // i.intervals.splice(k, 0, x) + i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...) + return + } + } + } +} + +func (i *IntervalSet) String() string { + return i.StringVerbose(nil, nil, false) +} + +func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string { + + if i.intervals == nil { + return "{}" + } else if literalNames != nil || symbolicNames != nil { + return i.toTokenString(literalNames, symbolicNames) + } else if elemsAreChar { + return i.toCharString() + } + + return i.toIndexString() +} + +func (i *IntervalSet) toCharString() string { + names := make([]string, len(i.intervals)) + + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, ("'" + string(v.Start) + "'")) + } + } else { + names = append(names, "'"+string(v.Start)+"'..'"+string(v.Stop-1)+"'") + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toIndexString() string { + + names := make([]string, 0) + for j := 0; j < len(i.intervals); j++ { + v := i.intervals[j] + if v.Stop == v.Start+1 { + if v.Start == TokenEOF { + names = append(names, "") + } else { + names = append(names, strconv.Itoa(v.Start)) + } + } else { + names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string { + names := make([]string, 0) + for _, v := range i.intervals { + for j := v.Start; j < v.Stop; j++ { + names = append(names, i.elementName(literalNames, symbolicNames, j)) + } + } + if len(names) > 1 { + return "{" + strings.Join(names, ", ") + "}" + } + + return names[0] +} + +func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string { + if a == TokenEOF { + return "" + } else if a == TokenEpsilon { + return "" + } else { + if a < len(literalNames) && literalNames[a] != "" { + return literalNames[a] + } + + return symbolicNames[a] + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go new file mode 100644 index 0000000000..b04f04572f --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go @@ -0,0 +1,418 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A lexer is recognizer that draws input symbols from a character stream. +// lexer grammars result in a subclass of this object. A Lexer object +// uses simplified Match() and error recovery mechanisms in the interest +// of speed. +/// + +type Lexer interface { + TokenSource + Recognizer + + Emit() Token + + SetChannel(int) + PushMode(int) + PopMode() int + SetType(int) + SetMode(int) +} + +type BaseLexer struct { + *BaseRecognizer + + Interpreter ILexerATNSimulator + TokenStartCharIndex int + TokenStartLine int + TokenStartColumn int + ActionType int + Virt Lexer // The most derived lexer implementation. Allows virtual method calls. + + input CharStream + factory TokenFactory + tokenFactorySourcePair *TokenSourceCharStreamPair + token Token + hitEOF bool + channel int + thetype int + modeStack IntStack + mode int + text string +} + +func NewBaseLexer(input CharStream) *BaseLexer { + + lexer := new(BaseLexer) + + lexer.BaseRecognizer = NewBaseRecognizer() + + lexer.input = input + lexer.factory = CommonTokenFactoryDEFAULT + lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input} + + lexer.Virt = lexer + + lexer.Interpreter = nil // child classes must populate it + + // The goal of all lexer rules/methods is to create a token object. + // l is an instance variable as multiple rules may collaborate to + // create a single token. NextToken will return l object after + // Matching lexer rule(s). If you subclass to allow multiple token + // emissions, then set l to the last token to be Matched or + // something nonnil so that the auto token emit mechanism will not + // emit another token. + lexer.token = nil + + // What character index in the stream did the current token start at? + // Needed, for example, to get the text for current token. Set at + // the start of NextToken. + lexer.TokenStartCharIndex = -1 + + // The line on which the first character of the token resides/// + lexer.TokenStartLine = -1 + + // The character position of first character within the line/// + lexer.TokenStartColumn = -1 + + // Once we see EOF on char stream, next token will be EOF. + // If you have DONE : EOF then you see DONE EOF. + lexer.hitEOF = false + + // The channel number for the current token/// + lexer.channel = TokenDefaultChannel + + // The token type for the current token/// + lexer.thetype = TokenInvalidType + + lexer.modeStack = make([]int, 0) + lexer.mode = LexerDefaultMode + + // You can set the text for the current token to override what is in + // the input char buffer. Use setText() or can set l instance var. + // / + lexer.text = "" + + return lexer +} + +const ( + LexerDefaultMode = 0 + LexerMore = -2 + LexerSkip = -3 +) + +const ( + LexerDefaultTokenChannel = TokenDefaultChannel + LexerHidden = TokenHiddenChannel + LexerMinCharValue = 0x0000 + LexerMaxCharValue = 0x10FFFF +) + +func (b *BaseLexer) reset() { + // wack Lexer state variables + if b.input != nil { + b.input.Seek(0) // rewind the input + } + b.token = nil + b.thetype = TokenInvalidType + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = -1 + b.TokenStartColumn = -1 + b.TokenStartLine = -1 + b.text = "" + + b.hitEOF = false + b.mode = LexerDefaultMode + b.modeStack = make([]int, 0) + + b.Interpreter.reset() +} + +func (b *BaseLexer) GetInterpreter() ILexerATNSimulator { + return b.Interpreter +} + +func (b *BaseLexer) GetInputStream() CharStream { + return b.input +} + +func (b *BaseLexer) GetSourceName() string { + return b.GrammarFileName +} + +func (b *BaseLexer) SetChannel(v int) { + b.channel = v +} + +func (b *BaseLexer) GetTokenFactory() TokenFactory { + return b.factory +} + +func (b *BaseLexer) setTokenFactory(f TokenFactory) { + b.factory = f +} + +func (b *BaseLexer) safeMatch() (ret int) { + defer func() { + if e := recover(); e != nil { + if re, ok := e.(RecognitionException); ok { + b.notifyListeners(re) // Report error + b.Recover(re) + ret = LexerSkip // default + } + } + }() + + return b.Interpreter.Match(b.input, b.mode) +} + +// Return a token from l source i.e., Match a token on the char stream. +func (b *BaseLexer) NextToken() Token { + if b.input == nil { + panic("NextToken requires a non-nil input stream.") + } + + tokenStartMarker := b.input.Mark() + + // previously in finally block + defer func() { + // make sure we release marker after Match or + // unbuffered char stream will keep buffering + b.input.Release(tokenStartMarker) + }() + + for { + if b.hitEOF { + b.EmitEOF() + return b.token + } + b.token = nil + b.channel = TokenDefaultChannel + b.TokenStartCharIndex = b.input.Index() + b.TokenStartColumn = b.Interpreter.GetCharPositionInLine() + b.TokenStartLine = b.Interpreter.GetLine() + b.text = "" + continueOuter := false + for { + b.thetype = TokenInvalidType + ttype := LexerSkip + + ttype = b.safeMatch() + + if b.input.LA(1) == TokenEOF { + b.hitEOF = true + } + if b.thetype == TokenInvalidType { + b.thetype = ttype + } + if b.thetype == LexerSkip { + continueOuter = true + break + } + if b.thetype != LexerMore { + break + } + } + + if continueOuter { + continue + } + if b.token == nil { + b.Virt.Emit() + } + return b.token + } + + return nil +} + +// Instruct the lexer to Skip creating a token for current lexer rule +// and look for another token. NextToken() knows to keep looking when +// a lexer rule finishes with token set to SKIPTOKEN. Recall that +// if token==nil at end of any token rule, it creates one for you +// and emits it. +// / +func (b *BaseLexer) Skip() { + b.thetype = LexerSkip +} + +func (b *BaseLexer) More() { + b.thetype = LexerMore +} + +func (b *BaseLexer) SetMode(m int) { + b.mode = m +} + +func (b *BaseLexer) PushMode(m int) { + if LexerATNSimulatorDebug { + fmt.Println("pushMode " + strconv.Itoa(m)) + } + b.modeStack.Push(b.mode) + b.mode = m +} + +func (b *BaseLexer) PopMode() int { + if len(b.modeStack) == 0 { + panic("Empty Stack") + } + if LexerATNSimulatorDebug { + fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1])) + } + i, _ := b.modeStack.Pop() + b.mode = i + return b.mode +} + +func (b *BaseLexer) inputStream() CharStream { + return b.input +} + +// SetInputStream resets the lexer input stream and associated lexer state. +func (b *BaseLexer) SetInputStream(input CharStream) { + b.input = nil + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} + b.reset() + b.input = input + b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input} +} + +func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair { + return b.tokenFactorySourcePair +} + +// By default does not support multiple emits per NextToken invocation +// for efficiency reasons. Subclass and override l method, NextToken, +// and GetToken (to push tokens into a list and pull from that list +// rather than a single variable as l implementation does). +// / +func (b *BaseLexer) EmitToken(token Token) { + b.token = token +} + +// The standard method called to automatically emit a token at the +// outermost lexical rule. The token object should point into the +// char buffer start..stop. If there is a text override in 'text', +// use that to set the token's text. Override l method to emit +// custom Token objects or provide a Newfactory. +// / +func (b *BaseLexer) Emit() Token { + t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn) + b.EmitToken(t) + return t +} + +func (b *BaseLexer) EmitEOF() Token { + cpos := b.GetCharPositionInLine() + lpos := b.GetLine() + eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos) + b.EmitToken(eof) + return eof +} + +func (b *BaseLexer) GetCharPositionInLine() int { + return b.Interpreter.GetCharPositionInLine() +} + +func (b *BaseLexer) GetLine() int { + return b.Interpreter.GetLine() +} + +func (b *BaseLexer) GetType() int { + return b.thetype +} + +func (b *BaseLexer) SetType(t int) { + b.thetype = t +} + +// What is the index of the current character of lookahead?/// +func (b *BaseLexer) GetCharIndex() int { + return b.input.Index() +} + +// Return the text Matched so far for the current token or any text override. +//Set the complete text of l token it wipes any previous changes to the text. +func (b *BaseLexer) GetText() string { + if b.text != "" { + return b.text + } + + return b.Interpreter.GetText(b.input) +} + +func (b *BaseLexer) SetText(text string) { + b.text = text +} + +func (b *BaseLexer) GetATN() *ATN { + return b.Interpreter.ATN() +} + +// Return a list of all Token objects in input char stream. +// Forces load of all tokens. Does not include EOF token. +// / +func (b *BaseLexer) GetAllTokens() []Token { + vl := b.Virt + tokens := make([]Token, 0) + t := vl.NextToken() + for t.GetTokenType() != TokenEOF { + tokens = append(tokens, t) + t = vl.NextToken() + } + return tokens +} + +func (b *BaseLexer) notifyListeners(e RecognitionException) { + start := b.TokenStartCharIndex + stop := b.input.Index() + text := b.input.GetTextFromInterval(NewInterval(start, stop)) + msg := "token recognition error at: '" + text + "'" + listener := b.GetErrorListenerDispatch() + listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e) +} + +func (b *BaseLexer) getErrorDisplayForChar(c rune) string { + if c == TokenEOF { + return "" + } else if c == '\n' { + return "\\n" + } else if c == '\t' { + return "\\t" + } else if c == '\r' { + return "\\r" + } else { + return string(c) + } +} + +func (b *BaseLexer) getCharErrorDisplay(c rune) string { + return "'" + b.getErrorDisplayForChar(c) + "'" +} + +// Lexers can normally Match any char in it's vocabulary after Matching +// a token, so do the easy thing and just kill a character and hope +// it all works out. You can instead use the rule invocation stack +// to do sophisticated error recovery if you are in a fragment rule. +// / +func (b *BaseLexer) Recover(re RecognitionException) { + if b.input.LA(1) != TokenEOF { + if _, ok := re.(*LexerNoViableAltException); ok { + // Skip a char and try again + b.Interpreter.Consume(b.input) + } else { + // TODO: Do we lose character or line position information? + b.input.Consume() + } + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go new file mode 100644 index 0000000000..20df84f943 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go @@ -0,0 +1,431 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "strconv" + +const ( + LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action. + LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action. + LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action. + LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action. + LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action. + LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action. + LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action. + LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action. +) + +type LexerAction interface { + getActionType() int + getIsPositionDependent() bool + execute(lexer Lexer) + hash() int + equals(other LexerAction) bool +} + +type BaseLexerAction struct { + actionType int + isPositionDependent bool +} + +func NewBaseLexerAction(action int) *BaseLexerAction { + la := new(BaseLexerAction) + + la.actionType = action + la.isPositionDependent = false + + return la +} + +func (b *BaseLexerAction) execute(lexer Lexer) { + panic("Not implemented") +} + +func (b *BaseLexerAction) getActionType() int { + return b.actionType +} + +func (b *BaseLexerAction) getIsPositionDependent() bool { + return b.isPositionDependent +} + +func (b *BaseLexerAction) hash() int { + return b.actionType +} + +func (b *BaseLexerAction) equals(other LexerAction) bool { + return b == other +} + +// +// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}. +// +//

The {@code Skip} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+type LexerSkipAction struct { + *BaseLexerAction +} + +func NewLexerSkipAction() *LexerSkipAction { + la := new(LexerSkipAction) + la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip) + return la +} + +// Provides a singleton instance of l parameterless lexer action. +var LexerSkipActionINSTANCE = NewLexerSkipAction() + +func (l *LexerSkipAction) execute(lexer Lexer) { + lexer.Skip() +} + +func (l *LexerSkipAction) String() string { + return "skip" +} + +// Implements the {@code type} lexer action by calling {@link Lexer//setType} +// with the assigned type. +type LexerTypeAction struct { + *BaseLexerAction + + thetype int +} + +func NewLexerTypeAction(thetype int) *LexerTypeAction { + l := new(LexerTypeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType) + l.thetype = thetype + return l +} + +func (l *LexerTypeAction) execute(lexer Lexer) { + lexer.SetType(l.thetype) +} + +func (l *LexerTypeAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.thetype) + return murmurFinish(h, 2) +} + +func (l *LexerTypeAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerTypeAction); !ok { + return false + } else { + return l.thetype == other.(*LexerTypeAction).thetype + } +} + +func (l *LexerTypeAction) String() string { + return "actionType(" + strconv.Itoa(l.thetype) + ")" +} + +// Implements the {@code pushMode} lexer action by calling +// {@link Lexer//pushMode} with the assigned mode. +type LexerPushModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerPushModeAction(mode int) *LexerPushModeAction { + + l := new(LexerPushModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode) + + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//pushMode} with the +// value provided by {@link //getMode}.

+func (l *LexerPushModeAction) execute(lexer Lexer) { + lexer.PushMode(l.mode) +} + +func (l *LexerPushModeAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerPushModeAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerPushModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerPushModeAction).mode + } +} + +func (l *LexerPushModeAction) String() string { + return "pushMode(" + strconv.Itoa(l.mode) + ")" +} + +// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}. +// +//

The {@code popMode} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+type LexerPopModeAction struct { + *BaseLexerAction +} + +func NewLexerPopModeAction() *LexerPopModeAction { + + l := new(LexerPopModeAction) + + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode) + + return l +} + +var LexerPopModeActionINSTANCE = NewLexerPopModeAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerPopModeAction) execute(lexer Lexer) { + lexer.PopMode() +} + +func (l *LexerPopModeAction) String() string { + return "popMode" +} + +// Implements the {@code more} lexer action by calling {@link Lexer//more}. +// +//

The {@code more} command does not have any parameters, so l action is +// implemented as a singleton instance exposed by {@link //INSTANCE}.

+ +type LexerMoreAction struct { + *BaseLexerAction +} + +func NewLexerMoreAction() *LexerMoreAction { + l := new(LexerMoreAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore) + + return l +} + +var LexerMoreActionINSTANCE = NewLexerMoreAction() + +//

This action is implemented by calling {@link Lexer//popMode}.

+func (l *LexerMoreAction) execute(lexer Lexer) { + lexer.More() +} + +func (l *LexerMoreAction) String() string { + return "more" +} + +// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with +// the assigned mode. +type LexerModeAction struct { + *BaseLexerAction + + mode int +} + +func NewLexerModeAction(mode int) *LexerModeAction { + l := new(LexerModeAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode) + l.mode = mode + return l +} + +//

This action is implemented by calling {@link Lexer//mode} with the +// value provided by {@link //getMode}.

+func (l *LexerModeAction) execute(lexer Lexer) { + lexer.SetMode(l.mode) +} + +func (l *LexerModeAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.mode) + return murmurFinish(h, 2) +} + +func (l *LexerModeAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerModeAction); !ok { + return false + } else { + return l.mode == other.(*LexerModeAction).mode + } +} + +func (l *LexerModeAction) String() string { + return "mode(" + strconv.Itoa(l.mode) + ")" +} + +// Executes a custom lexer action by calling {@link Recognizer//action} with the +// rule and action indexes assigned to the custom action. The implementation of +// a custom action is added to the generated code for the lexer in an override +// of {@link Recognizer//action} when the grammar is compiled. +// +//

This class may represent embedded actions created with the {...} +// syntax in ANTLR 4, as well as actions created for lexer commands where the +// command argument could not be evaluated when the grammar was compiled.

+ +// Constructs a custom lexer action with the specified rule and action +// indexes. +// +// @param ruleIndex The rule index to use for calls to +// {@link Recognizer//action}. +// @param actionIndex The action index to use for calls to +// {@link Recognizer//action}. + +type LexerCustomAction struct { + *BaseLexerAction + ruleIndex, actionIndex int +} + +func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction { + l := new(LexerCustomAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom) + l.ruleIndex = ruleIndex + l.actionIndex = actionIndex + l.isPositionDependent = true + return l +} + +//

Custom actions are implemented by calling {@link Lexer//action} with the +// appropriate rule and action indexes.

+func (l *LexerCustomAction) execute(lexer Lexer) { + lexer.Action(nil, l.ruleIndex, l.actionIndex) +} + +func (l *LexerCustomAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.ruleIndex) + h = murmurUpdate(h, l.actionIndex) + return murmurFinish(h, 3) +} + +func (l *LexerCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerCustomAction); !ok { + return false + } else { + return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex + } +} + +// Implements the {@code channel} lexer action by calling +// {@link Lexer//setChannel} with the assigned channel. +// Constructs a New{@code channel} action with the specified channel value. +// @param channel The channel value to pass to {@link Lexer//setChannel}. +type LexerChannelAction struct { + *BaseLexerAction + + channel int +} + +func NewLexerChannelAction(channel int) *LexerChannelAction { + l := new(LexerChannelAction) + l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel) + l.channel = channel + return l +} + +//

This action is implemented by calling {@link Lexer//setChannel} with the +// value provided by {@link //getChannel}.

+func (l *LexerChannelAction) execute(lexer Lexer) { + lexer.SetChannel(l.channel) +} + +func (l *LexerChannelAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.channel) + return murmurFinish(h, 2) +} + +func (l *LexerChannelAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerChannelAction); !ok { + return false + } else { + return l.channel == other.(*LexerChannelAction).channel + } +} + +func (l *LexerChannelAction) String() string { + return "channel(" + strconv.Itoa(l.channel) + ")" +} + +// This implementation of {@link LexerAction} is used for tracking input offsets +// for position-dependent actions within a {@link LexerActionExecutor}. +// +//

This action is not serialized as part of the ATN, and is only required for +// position-dependent lexer actions which appear at a location other than the +// end of a rule. For more information about DFA optimizations employed for +// lexer actions, see {@link LexerActionExecutor//append} and +// {@link LexerActionExecutor//fixOffsetBeforeMatch}.

+ +// Constructs a Newindexed custom action by associating a character offset +// with a {@link LexerAction}. +// +//

Note: This class is only required for lexer actions for which +// {@link LexerAction//isPositionDependent} returns {@code true}.

+// +// @param offset The offset into the input {@link CharStream}, relative to +// the token start index, at which the specified lexer action should be +// executed. +// @param action The lexer action to execute at a particular offset in the +// input {@link CharStream}. +type LexerIndexedCustomAction struct { + *BaseLexerAction + + offset int + lexerAction LexerAction + isPositionDependent bool +} + +func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction { + + l := new(LexerIndexedCustomAction) + l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType()) + + l.offset = offset + l.lexerAction = lexerAction + l.isPositionDependent = true + + return l +} + +//

This method calls {@link //execute} on the result of {@link //getAction} +// using the provided {@code lexer}.

+func (l *LexerIndexedCustomAction) execute(lexer Lexer) { + // assume the input stream position was properly set by the calling code + l.lexerAction.execute(lexer) +} + +func (l *LexerIndexedCustomAction) hash() int { + h := murmurInit(0) + h = murmurUpdate(h, l.actionType) + h = murmurUpdate(h, l.offset) + h = murmurUpdate(h, l.lexerAction.hash()) + return murmurFinish(h, 3) +} + +func (l *LexerIndexedCustomAction) equals(other LexerAction) bool { + if l == other { + return true + } else if _, ok := other.(*LexerIndexedCustomAction); !ok { + return false + } else { + return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go new file mode 100644 index 0000000000..80b949a1a5 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go @@ -0,0 +1,170 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// Represents an executor for a sequence of lexer actions which traversed during +// the Matching operation of a lexer rule (token). +// +//

The executor tracks position information for position-dependent lexer actions +// efficiently, ensuring that actions appearing only at the end of the rule do +// not cause bloating of the {@link DFA} created for the lexer.

+ +type LexerActionExecutor struct { + lexerActions []LexerAction + cachedHash int +} + +func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor { + + if lexerActions == nil { + lexerActions = make([]LexerAction, 0) + } + + l := new(LexerActionExecutor) + + l.lexerActions = lexerActions + + // Caches the result of {@link //hashCode} since the hash code is an element + // of the performance-critical {@link LexerATNConfig//hashCode} operation. + l.cachedHash = murmurInit(57) + for _, a := range lexerActions { + l.cachedHash = murmurUpdate(l.cachedHash, a.hash()) + } + + return l +} + +// Creates a {@link LexerActionExecutor} which executes the actions for +// the input {@code lexerActionExecutor} followed by a specified +// {@code lexerAction}. +// +// @param lexerActionExecutor The executor for actions already traversed by +// the lexer while Matching a token within a particular +// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as +// though it were an empty executor. +// @param lexerAction The lexer action to execute after the actions +// specified in {@code lexerActionExecutor}. +// +// @return A {@link LexerActionExecutor} for executing the combine actions +// of {@code lexerActionExecutor} and {@code lexerAction}. +func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor { + if lexerActionExecutor == nil { + return NewLexerActionExecutor([]LexerAction{lexerAction}) + } + + return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction)) +} + +// Creates a {@link LexerActionExecutor} which encodes the current offset +// for position-dependent lexer actions. +// +//

Normally, when the executor encounters lexer actions where +// {@link LexerAction//isPositionDependent} returns {@code true}, it calls +// {@link IntStream//seek} on the input {@link CharStream} to set the input +// position to the end of the current token. This behavior provides +// for efficient DFA representation of lexer actions which appear at the end +// of a lexer rule, even when the lexer rule Matches a variable number of +// characters.

+// +//

Prior to traversing a Match transition in the ATN, the current offset +// from the token start index is assigned to all position-dependent lexer +// actions which have not already been assigned a fixed offset. By storing +// the offsets relative to the token start index, the DFA representation of +// lexer actions which appear in the middle of tokens remains efficient due +// to sharing among tokens of the same length, regardless of their absolute +// position in the input stream.

+// +//

If the current executor already has offsets assigned to all +// position-dependent lexer actions, the method returns {@code this}.

+// +// @param offset The current offset to assign to all position-dependent +// lexer actions which do not already have offsets assigned. +// +// @return A {@link LexerActionExecutor} which stores input stream offsets +// for all position-dependent lexer actions. +// / +func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor { + var updatedLexerActions []LexerAction + for i := 0; i < len(l.lexerActions); i++ { + _, ok := l.lexerActions[i].(*LexerIndexedCustomAction) + if l.lexerActions[i].getIsPositionDependent() && !ok { + if updatedLexerActions == nil { + updatedLexerActions = make([]LexerAction, 0) + + for _, a := range l.lexerActions { + updatedLexerActions = append(updatedLexerActions, a) + } + } + + updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i]) + } + } + if updatedLexerActions == nil { + return l + } + + return NewLexerActionExecutor(updatedLexerActions) +} + +// Execute the actions encapsulated by l executor within the context of a +// particular {@link Lexer}. +// +//

This method calls {@link IntStream//seek} to set the position of the +// {@code input} {@link CharStream} prior to calling +// {@link LexerAction//execute} on a position-dependent action. Before the +// method returns, the input position will be restored to the same position +// it was in when the method was invoked.

+// +// @param lexer The lexer instance. +// @param input The input stream which is the source for the current token. +// When l method is called, the current {@link IntStream//index} for +// {@code input} should be the start of the following token, i.e. 1 +// character past the end of the current token. +// @param startIndex The token start index. This value may be passed to +// {@link IntStream//seek} to set the {@code input} position to the beginning +// of the token. +// / +func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) { + requiresSeek := false + stopIndex := input.Index() + + defer func() { + if requiresSeek { + input.Seek(stopIndex) + } + }() + + for i := 0; i < len(l.lexerActions); i++ { + lexerAction := l.lexerActions[i] + if la, ok := lexerAction.(*LexerIndexedCustomAction); ok { + offset := la.offset + input.Seek(startIndex + offset) + lexerAction = la.lexerAction + requiresSeek = (startIndex + offset) != stopIndex + } else if lexerAction.getIsPositionDependent() { + input.Seek(stopIndex) + requiresSeek = false + } + lexerAction.execute(lexer) + } +} + +func (l *LexerActionExecutor) hash() int { + if l == nil { + return 61 + } + return l.cachedHash +} + +func (l *LexerActionExecutor) equals(other interface{}) bool { + if l == other { + return true + } else if _, ok := other.(*LexerActionExecutor); !ok { + return false + } else { + return l.cachedHash == other.(*LexerActionExecutor).cachedHash && + &l.lexerActions == &other.(*LexerActionExecutor).lexerActions + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go new file mode 100644 index 0000000000..131364f75c --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go @@ -0,0 +1,658 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +var ( + LexerATNSimulatorDebug = false + LexerATNSimulatorDFADebug = false + + LexerATNSimulatorMinDFAEdge = 0 + LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN + + LexerATNSimulatorMatchCalls = 0 +) + +type ILexerATNSimulator interface { + IATNSimulator + + reset() + Match(input CharStream, mode int) int + GetCharPositionInLine() int + GetLine() int + GetText(input CharStream) string + Consume(input CharStream) +} + +type LexerATNSimulator struct { + *BaseATNSimulator + + recog Lexer + predictionMode int + mergeCache DoubleDict + startIndex int + Line int + CharPositionInLine int + mode int + prevAccept *SimState + MatchCalls int +} + +func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator { + l := new(LexerATNSimulator) + + l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + l.decisionToDFA = decisionToDFA + l.recog = recog + // The current token's starting index into the character stream. + // Shared across DFA to ATN simulation in case the ATN fails and the + // DFA did not have a previous accept state. In l case, we use the + // ATN-generated exception object. + l.startIndex = -1 + // line number 1..n within the input/// + l.Line = 1 + // The index of the character relative to the beginning of the line + // 0..n-1/// + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode + // Used during DFA/ATN exec to record the most recent accept configuration + // info + l.prevAccept = NewSimState() + // done + return l +} + +func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) { + l.CharPositionInLine = simulator.CharPositionInLine + l.Line = simulator.Line + l.mode = simulator.mode + l.startIndex = simulator.startIndex +} + +func (l *LexerATNSimulator) Match(input CharStream, mode int) int { + l.MatchCalls++ + l.mode = mode + mark := input.Mark() + + defer func() { + input.Release(mark) + }() + + l.startIndex = input.Index() + l.prevAccept.reset() + + dfa := l.decisionToDFA[mode] + + if dfa.s0 == nil { + return l.MatchATN(input) + } + + return l.execATN(input, dfa.s0) +} + +func (l *LexerATNSimulator) reset() { + l.prevAccept.reset() + l.startIndex = -1 + l.Line = 1 + l.CharPositionInLine = 0 + l.mode = LexerDefaultMode +} + +func (l *LexerATNSimulator) MatchATN(input CharStream) int { + startState := l.atn.modeToStartState[l.mode] + + if LexerATNSimulatorDebug { + fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String()) + } + oldMode := l.mode + s0Closure := l.computeStartState(input, startState) + suppressEdge := s0Closure.hasSemanticContext + s0Closure.hasSemanticContext = false + + next := l.addDFAState(s0Closure) + + if !suppressEdge { + l.decisionToDFA[l.mode].setS0(next) + } + + predict := l.execATN(input, next) + + if LexerATNSimulatorDebug { + fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString()) + } + return predict +} + +func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int { + + if LexerATNSimulatorDebug { + fmt.Println("start state closure=" + ds0.configs.String()) + } + if ds0.isAcceptState { + // allow zero-length tokens + l.captureSimState(l.prevAccept, input, ds0) + } + t := input.LA(1) + s := ds0 // s is current/from DFA state + + for { // while more work + if LexerATNSimulatorDebug { + fmt.Println("execATN loop starting closure: " + s.configs.String()) + } + + // As we move src->trg, src->trg, we keep track of the previous trg to + // avoid looking up the DFA state again, which is expensive. + // If the previous target was already part of the DFA, we might + // be able to avoid doing a reach operation upon t. If s!=nil, + // it means that semantic predicates didn't prevent us from + // creating a DFA state. Once we know s!=nil, we check to see if + // the DFA state has an edge already for t. If so, we can just reuse + // it's configuration set there's no point in re-computing it. + // This is kind of like doing DFA simulation within the ATN + // simulation because DFA simulation is really just a way to avoid + // computing reach/closure sets. Technically, once we know that + // we have a previously added DFA state, we could jump over to + // the DFA simulator. But, that would mean popping back and forth + // a lot and making things more complicated algorithmically. + // This optimization makes a lot of sense for loops within DFA. + // A character will take us back to an existing DFA state + // that already has lots of edges out of it. e.g., .* in comments. + target := l.getExistingTargetState(s, t) + if target == nil { + target = l.computeTargetState(input, s, t) + // print("Computed:" + str(target)) + } + if target == ATNSimulatorError { + break + } + // If l is a consumable input element, make sure to consume before + // capturing the accept state so the input index, line, and char + // position accurately reflect the state of the interpreter at the + // end of the token. + if t != TokenEOF { + l.Consume(input) + } + if target.isAcceptState { + l.captureSimState(l.prevAccept, input, target) + if t == TokenEOF { + break + } + } + t = input.LA(1) + s = target // flip current DFA target becomes Newsrc/from state + } + + return l.failOrAccept(l.prevAccept, input, s.configs, t) +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// l method returns {@code nil}. +// +// @param s The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for l edge is not +// already cached +func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState { + if s.edges == nil || t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge { + return nil + } + + target := s.edges[t-LexerATNSimulatorMinDFAEdge] + if LexerATNSimulatorDebug && target != nil { + fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber)) + } + return target +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param input The input stream +// @param s The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, l method +// returns {@link //ERROR}. +func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState { + reach := NewOrderedATNConfigSet() + + // if we don't find an existing DFA state + // Fill reach starting from closure, following t transitions + l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t) + + if len(reach.configs) == 0 { // we got nowhere on t from s + if !reach.hasSemanticContext { + // we got nowhere on t, don't panic out l knowledge it'd + // cause a failover from DFA later. + l.addDFAEdge(s, t, ATNSimulatorError, nil) + } + // stop when we can't Match any more char + return ATNSimulatorError + } + // Add an edge from s to target DFA found/created for reach + return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet) +} + +func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int { + if l.prevAccept.dfaState != nil { + lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor + l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column) + return prevAccept.dfaState.prediction + } + + // if no accept and EOF is first char, return EOF + if t == TokenEOF && input.Index() == l.startIndex { + return TokenEOF + } + + panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach)) +} + +// Given a starting configuration set, figure out all ATN configurations +// we can reach upon input {@code t}. Parameter {@code reach} is a return +// parameter. +func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) { + // l is used to Skip processing for configs which have a lower priority + // than a config that already reached an accept state for the same rule + SkipAlt := ATNInvalidAltNumber + + for _, cfg := range closure.GetItems() { + currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt) + if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision { + continue + } + + if LexerATNSimulatorDebug { + + fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true)) + } + + for _, trans := range cfg.GetState().GetTransitions() { + target := l.getReachableTarget(trans, t) + if target != nil { + lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor + if lexerActionExecutor != nil { + lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex) + } + treatEOFAsEpsilon := (t == TokenEOF) + config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor) + if l.closure(input, config, reach, + currentAltReachedAcceptState, true, treatEOFAsEpsilon) { + // any remaining configs for l alt have a lower priority + // than the one that just reached an accept state. + SkipAlt = cfg.GetAlt() + } + } + } + } +} + +func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) { + if LexerATNSimulatorDebug { + fmt.Printf("ACTION %s\n", lexerActionExecutor) + } + // seek to after last char in token + input.Seek(index) + l.Line = line + l.CharPositionInLine = charPos + if lexerActionExecutor != nil && l.recog != nil { + lexerActionExecutor.execute(l.recog, input, startIndex) + } +} + +func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState { + if trans.Matches(t, 0, LexerMaxCharValue) { + return trans.getTarget() + } + + return nil +} + +func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet { + configs := NewOrderedATNConfigSet() + for i := 0; i < len(p.GetTransitions()); i++ { + target := p.GetTransitions()[i].getTarget() + cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY) + l.closure(input, cfg, configs, false, false, false) + } + + return configs +} + +// Since the alternatives within any lexer decision are ordered by +// preference, l method stops pursuing the closure as soon as an accept +// state is reached. After the first accept state is reached by depth-first +// search from {@code config}, all other (potentially reachable) states for +// l rule would have a lower priority. +// +// @return {@code true} if an accept state is reached, otherwise +// {@code false}. +func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool { + + if LexerATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")") + } + + _, ok := config.state.(*RuleStopState) + if ok { + + if LexerATNSimulatorDebug { + if l.recog != nil { + fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config) + } else { + fmt.Printf("closure at rule stop %s\n", config) + } + } + + if config.context == nil || config.context.hasEmptyPath() { + if config.context == nil || config.context.isEmpty() { + configs.Add(config, nil) + return true + } + + configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil) + currentAltReachedAcceptState = true + } + if config.context != nil && !config.context.isEmpty() { + for i := 0; i < config.context.length(); i++ { + if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState { + newContext := config.context.GetParent(i) // "pop" return state + returnState := l.atn.states[config.context.getReturnState(i)] + cfg := NewLexerATNConfig2(config, returnState, newContext) + currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + } + return currentAltReachedAcceptState + } + // optimization + if !config.state.GetEpsilonOnlyTransitions() { + if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision { + configs.Add(config, nil) + } + } + for j := 0; j < len(config.state.GetTransitions()); j++ { + trans := config.state.GetTransitions()[j] + cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon) + if cfg != nil { + currentAltReachedAcceptState = l.closure(input, cfg, configs, + currentAltReachedAcceptState, speculative, treatEOFAsEpsilon) + } + } + return currentAltReachedAcceptState +} + +// side-effect: can alter configs.hasSemanticContext +func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition, + configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig { + + var cfg *LexerATNConfig + + if trans.getSerializationType() == TransitionRULE { + + rt := trans.(*RuleTransition) + newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber()) + cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext) + + } else if trans.getSerializationType() == TransitionPRECEDENCE { + panic("Precedence predicates are not supported in lexers.") + } else if trans.getSerializationType() == TransitionPREDICATE { + // Track traversing semantic predicates. If we traverse, + // we cannot add a DFA state for l "reach" computation + // because the DFA would not test the predicate again in the + // future. Rather than creating collections of semantic predicates + // like v3 and testing them on prediction, v4 will test them on the + // fly all the time using the ATN not the DFA. This is slower but + // semantically it's not used that often. One of the key elements to + // l predicate mechanism is not adding DFA states that see + // predicates immediately afterwards in the ATN. For example, + + // a : ID {p1}? | ID {p2}? + + // should create the start state for rule 'a' (to save start state + // competition), but should not create target of ID state. The + // collection of ATN states the following ID references includes + // states reached by traversing predicates. Since l is when we + // test them, we cannot cash the DFA state target of ID. + + pt := trans.(*PredicateTransition) + + if LexerATNSimulatorDebug { + fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex)) + } + configs.SetHasSemanticContext(true) + if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionACTION { + if config.context == nil || config.context.hasEmptyPath() { + // execute actions anywhere in the start rule for a token. + // + // TODO: if the entry rule is invoked recursively, some + // actions may be executed during the recursive call. The + // problem can appear when hasEmptyPath() is true but + // isEmpty() is false. In l case, the config needs to be + // split into two contexts - one with just the empty path + // and another with everything but the empty path. + // Unfortunately, the current algorithm does not allow + // getEpsilonTarget to return two configurations, so + // additional modifications are needed before we can support + // the split operation. + lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex]) + cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor) + } else { + // ignore actions in referenced rules + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } else if trans.getSerializationType() == TransitionEPSILON { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } else if trans.getSerializationType() == TransitionATOM || + trans.getSerializationType() == TransitionRANGE || + trans.getSerializationType() == TransitionSET { + if treatEOFAsEpsilon { + if trans.Matches(TokenEOF, 0, LexerMaxCharValue) { + cfg = NewLexerATNConfig4(config, trans.getTarget()) + } + } + } + return cfg +} + +// Evaluate a predicate specified in the lexer. +// +//

If {@code speculative} is {@code true}, l method was called before +// {@link //consume} for the Matched character. This method should call +// {@link //consume} before evaluating the predicate to ensure position +// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine}, +// and {@link Lexer//getcolumn}, properly reflect the current +// lexer state. This method should restore {@code input} and the simulator +// to the original state before returning (i.e. undo the actions made by the +// call to {@link //consume}.

+// +// @param input The input stream. +// @param ruleIndex The rule containing the predicate. +// @param predIndex The index of the predicate within the rule. +// @param speculative {@code true} if the current index in {@code input} is +// one character before the predicate's location. +// +// @return {@code true} if the specified predicate evaluates to +// {@code true}. +// / +func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool { + // assume true if no recognizer was provided + if l.recog == nil { + return true + } + if !speculative { + return l.recog.Sempred(nil, ruleIndex, predIndex) + } + savedcolumn := l.CharPositionInLine + savedLine := l.Line + index := input.Index() + marker := input.Mark() + + defer func() { + l.CharPositionInLine = savedcolumn + l.Line = savedLine + input.Seek(index) + input.Release(marker) + }() + + l.Consume(input) + return l.recog.Sempred(nil, ruleIndex, predIndex) +} + +func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) { + settings.index = input.Index() + settings.line = l.Line + settings.column = l.CharPositionInLine + settings.dfaState = dfaState +} + +func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState { + if to == nil && cfgs != nil { + // leading to l call, ATNConfigSet.hasSemanticContext is used as a + // marker indicating dynamic predicate evaluation makes l edge + // dependent on the specific input sequence, so the static edge in the + // DFA should be omitted. The target DFAState is still created since + // execATN has the ability to reSynchronize with the DFA state cache + // following the predicate evaluation step. + // + // TJP notes: next time through the DFA, we see a pred again and eval. + // If that gets us to a previously created (but dangling) DFA + // state, we can continue in pure DFA mode from there. + // / + suppressEdge := cfgs.HasSemanticContext() + cfgs.SetHasSemanticContext(false) + + to = l.addDFAState(cfgs) + + if suppressEdge { + return to + } + } + // add the edge + if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge { + // Only track edges within the DFA bounds + return to + } + if LexerATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk)) + } + if from.edges == nil { + // make room for tokens 1..n and -1 masquerading as index 0 + from.edges = make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1) + } + from.edges[tk-LexerATNSimulatorMinDFAEdge] = to // connect + + return to +} + +// Add a NewDFA state if there isn't one with l set of +// configurations already. This method also detects the first +// configuration containing an ATN rule stop state. Later, when +// traversing the DFA, we will know which rule to accept. +func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet) *DFAState { + + proposed := NewDFAState(-1, configs) + var firstConfigWithRuleStopState ATNConfig + + for _, cfg := range configs.GetItems() { + + _, ok := cfg.GetState().(*RuleStopState) + + if ok { + firstConfigWithRuleStopState = cfg + break + } + } + if firstConfigWithRuleStopState != nil { + proposed.isAcceptState = true + proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor + proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()]) + } + hash := proposed.hash() + dfa := l.decisionToDFA[l.mode] + existing, ok := dfa.getState(hash) + if ok { + return existing + } + newState := proposed + newState.stateNumber = dfa.numStates() + configs.SetReadOnly(true) + newState.configs = configs + dfa.setState(hash, newState) + return newState +} + +func (l *LexerATNSimulator) getDFA(mode int) *DFA { + return l.decisionToDFA[mode] +} + +// Get the text Matched so far for the current token. +func (l *LexerATNSimulator) GetText(input CharStream) string { + // index is first lookahead char, don't include. + return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1)) +} + +func (l *LexerATNSimulator) Consume(input CharStream) { + curChar := input.LA(1) + if curChar == int('\n') { + l.Line++ + l.CharPositionInLine = 0 + } else { + l.CharPositionInLine++ + } + input.Consume() +} + +func (l *LexerATNSimulator) GetCharPositionInLine() int { + return l.CharPositionInLine +} + +func (l *LexerATNSimulator) GetLine() int { + return l.Line +} + +func (l *LexerATNSimulator) GetTokenName(tt int) string { + if tt == -1 { + return "EOF" + } + + return "'" + string(tt) + "'" +} + +func resetSimState(sim *SimState) { + sim.index = -1 + sim.line = 0 + sim.column = -1 + sim.dfaState = nil +} + +type SimState struct { + index int + line int + column int + dfaState *DFAState +} + +func NewSimState() *SimState { + s := new(SimState) + resetSimState(s) + return s +} + +func (s *SimState) reset() { + resetSimState(s) +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go new file mode 100644 index 0000000000..f5afd09b39 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go @@ -0,0 +1,215 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type LL1Analyzer struct { + atn *ATN +} + +func NewLL1Analyzer(atn *ATN) *LL1Analyzer { + la := new(LL1Analyzer) + la.atn = atn + return la +} + +//* Special value added to the lookahead sets to indicate that we hit +// a predicate during analysis if {@code seeThruPreds==false}. +/// +const ( + LL1AnalyzerHitPred = TokenInvalidType +) + +//* +// Calculates the SLL(1) expected lookahead set for each outgoing transition +// of an {@link ATNState}. The returned array has one element for each +// outgoing transition in {@code s}. If the closure from transition +// i leads to a semantic predicate before Matching a symbol, the +// element at index i of the result will be {@code nil}. +// +// @param s the ATN state +// @return the expected symbols for each outgoing transition of {@code s}. +func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet { + if s == nil { + return nil + } + count := len(s.GetTransitions()) + look := make([]*IntervalSet, count) + for alt := 0; alt < count; alt++ { + look[alt] = NewIntervalSet() + lookBusy := NewSet(nil, nil) + seeThruPreds := false // fail to get lookahead upon pred + la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false) + // Wipe out lookahead for la alternative if we found nothing + // or we had a predicate when we !seeThruPreds + if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) { + look[alt] = nil + } + } + return look +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and the end of the rule containing +// {@code s} is reached, {@link Token//EPSILON} is added to the result set. +// If {@code ctx} is not {@code nil} and the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx the complete parser context, or {@code nil} if the context +// should be ignored +// +// @return The set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +/// +func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet { + r := NewIntervalSet() + seeThruPreds := true // ignore preds get all lookahead + var lookContext PredictionContext + if ctx != nil { + lookContext = predictionContextFromRuleContext(s.GetATN(), ctx) + } + la.look1(s, stopState, lookContext, r, NewSet(nil, nil), NewBitSet(), seeThruPreds, true) + return r +} + +//* +// Compute set of tokens that can follow {@code s} in the ATN in the +// specified {@code ctx}. +// +//

If {@code ctx} is {@code nil} and {@code stopState} or the end of the +// rule containing {@code s} is reached, {@link Token//EPSILON} is added to +// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is +// {@code true} and {@code stopState} or the end of the outermost rule is +// reached, {@link Token//EOF} is added to the result set.

+// +// @param s the ATN state. +// @param stopState the ATN state to stop at. This can be a +// {@link BlockEndState} to detect epsilon paths through a closure. +// @param ctx The outer context, or {@code nil} if the outer context should +// not be used. +// @param look The result lookahead set. +// @param lookBusy A set used for preventing epsilon closures in the ATN +// from causing a stack overflow. Outside code should pass +// {@code NewSet} for la argument. +// @param calledRuleStack A set used for preventing left recursion in the +// ATN from causing a stack overflow. Outside code should pass +// {@code NewBitSet()} for la argument. +// @param seeThruPreds {@code true} to true semantic predicates as +// implicitly {@code true} and "see through them", otherwise {@code false} +// to treat semantic predicates as opaque and add {@link //HitPred} to the +// result if one is encountered. +// @param addEOF Add {@link Token//EOF} to the result if the end of the +// outermost context is reached. This parameter has no effect if {@code ctx} +// is {@code nil}. + +func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) { + + returnState := la.atn.states[ctx.getReturnState(i)] + + removed := calledRuleStack.contains(returnState.GetRuleIndex()) + + defer func() { + if removed { + calledRuleStack.add(returnState.GetRuleIndex()) + } + }() + + calledRuleStack.remove(returnState.GetRuleIndex()) + la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} + +func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) { + + c := NewBaseATNConfig6(s, 0, ctx) + + if lookBusy.contains(c) { + return + } + + lookBusy.add(c) + + if s == stopState { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + } + + _, ok := s.(*RuleStopState) + + if ok { + if ctx == nil { + look.addOne(TokenEpsilon) + return + } else if ctx.isEmpty() && addEOF { + look.addOne(TokenEOF) + return + } + + if ctx != BasePredictionContextEMPTY { + // run thru all possible stack tops in ctx + for i := 0; i < ctx.length(); i++ { + returnState := la.atn.states[ctx.getReturnState(i)] + la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i) + } + return + } + } + + n := len(s.GetTransitions()) + + for i := 0; i < n; i++ { + t := s.GetTransitions()[i] + + if t1, ok := t.(*RuleTransition); ok { + if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) { + continue + } + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1) + } else if t2, ok := t.(AbstractPredicateTransition); ok { + if seeThruPreds { + la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else { + look.addOne(LL1AnalyzerHitPred) + } + } else if t.getIsEpsilon() { + la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + } else if _, ok := t.(*WildcardTransition); ok { + look.addRange(TokenMinUserTokenType, la.atn.maxTokenType) + } else { + set := t.getLabel() + if set != nil { + if _, ok := t.(*NotSetTransition); ok { + set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType) + } + look.addSet(set) + } + } + } +} + +func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy *Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) { + + newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber()) + + defer func() { + calledRuleStack.remove(t1.getTarget().GetRuleIndex()) + }() + + calledRuleStack.add(t1.getTarget().GetRuleIndex()) + la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF) + +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go new file mode 100644 index 0000000000..fb60258e33 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go @@ -0,0 +1,718 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +type Parser interface { + Recognizer + + GetInterpreter() *ParserATNSimulator + + GetTokenStream() TokenStream + GetTokenFactory() TokenFactory + GetParserRuleContext() ParserRuleContext + SetParserRuleContext(ParserRuleContext) + Consume() Token + GetParseListeners() []ParseTreeListener + + GetErrorHandler() ErrorStrategy + SetErrorHandler(ErrorStrategy) + GetInputStream() IntStream + GetCurrentToken() Token + GetExpectedTokens() *IntervalSet + NotifyErrorListeners(string, Token, RecognitionException) + IsExpectedToken(int) bool + GetPrecedence() int + GetRuleInvocationStack(ParserRuleContext) []string +} + +type BaseParser struct { + *BaseRecognizer + + Interpreter *ParserATNSimulator + BuildParseTrees bool + + input TokenStream + errHandler ErrorStrategy + precedenceStack IntStack + ctx ParserRuleContext + + tracer *TraceListener + parseListeners []ParseTreeListener + _SyntaxErrors int +} + +// p.is all the parsing support code essentially most of it is error +// recovery stuff.// +func NewBaseParser(input TokenStream) *BaseParser { + + p := new(BaseParser) + + p.BaseRecognizer = NewBaseRecognizer() + + // The input stream. + p.input = nil + // The error handling strategy for the parser. The default value is a new + // instance of {@link DefaultErrorStrategy}. + p.errHandler = NewDefaultErrorStrategy() + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + // The {@link ParserRuleContext} object for the currently executing rule. + // p.is always non-nil during the parsing process. + p.ctx = nil + // Specifies whether or not the parser should construct a parse tree during + // the parsing process. The default value is {@code true}. + p.BuildParseTrees = true + // When {@link //setTrace}{@code (true)} is called, a reference to the + // {@link TraceListener} is stored here so it can be easily removed in a + // later call to {@link //setTrace}{@code (false)}. The listener itself is + // implemented as a parser listener so p.field is not directly used by + // other parser methods. + p.tracer = nil + // The list of {@link ParseTreeListener} listeners registered to receive + // events during the parse. + p.parseListeners = nil + // The number of syntax errors Reported during parsing. p.value is + // incremented each time {@link //NotifyErrorListeners} is called. + p._SyntaxErrors = 0 + p.SetInputStream(input) + + return p +} + +// p.field maps from the serialized ATN string to the deserialized {@link +// ATN} with +// bypass alternatives. +// +// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions() +// +var bypassAltsAtnCache = make(map[string]int) + +// reset the parser's state// +func (p *BaseParser) reset() { + if p.input != nil { + p.input.Seek(0) + } + p.errHandler.reset(p) + p.ctx = nil + p._SyntaxErrors = 0 + p.SetTrace(nil) + p.precedenceStack = make([]int, 0) + p.precedenceStack.Push(0) + if p.Interpreter != nil { + p.Interpreter.reset() + } +} + +func (p *BaseParser) GetErrorHandler() ErrorStrategy { + return p.errHandler +} + +func (p *BaseParser) SetErrorHandler(e ErrorStrategy) { + p.errHandler = e +} + +// Match current input symbol against {@code ttype}. If the symbol type +// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are +// called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @param ttype the token type to Match +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// {@code ttype} and the error strategy could not recover from the +// mismatched symbol + +func (p *BaseParser) Match(ttype int) Token { + + t := p.GetCurrentToken() + + if t.GetTokenType() == ttype { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + + return t +} + +// Match current input symbol as a wildcard. If the symbol type Matches +// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch} +// and {@link //consume} are called to complete the Match process. +// +//

If the symbol type does not Match, +// {@link ANTLRErrorStrategy//recoverInline} is called on the current error +// strategy to attempt recovery. If {@link //getBuildParseTree} is +// {@code true} and the token index of the symbol returned by +// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to +// the parse tree by calling {@link ParserRuleContext//addErrorNode}.

+// +// @return the Matched symbol +// @panics RecognitionException if the current input symbol did not Match +// a wildcard and the error strategy could not recover from the mismatched +// symbol + +func (p *BaseParser) MatchWildcard() Token { + t := p.GetCurrentToken() + if t.GetTokenType() > 0 { + p.errHandler.ReportMatch(p) + p.Consume() + } else { + t = p.errHandler.RecoverInline(p) + if p.BuildParseTrees && t.GetTokenIndex() == -1 { + // we must have conjured up a Newtoken during single token + // insertion + // if it's not the current symbol + p.ctx.AddErrorNode(t) + } + } + return t +} + +func (p *BaseParser) GetParserRuleContext() ParserRuleContext { + return p.ctx +} + +func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) { + p.ctx = v +} + +func (p *BaseParser) GetParseListeners() []ParseTreeListener { + if p.parseListeners == nil { + return make([]ParseTreeListener, 0) + } + return p.parseListeners +} + +// Registers {@code listener} to receive events during the parsing process. +// +//

To support output-preserving grammar transformations (including but not +// limited to left-recursion removal, automated left-factoring, and +// optimized code generation), calls to listener methods during the parse +// may differ substantially from calls made by +// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In +// particular, rule entry and exit events may occur in a different order +// during the parse than after the parser. In addition, calls to certain +// rule entry methods may be omitted.

+// +//

With the following specific exceptions, calls to listener events are +// deterministic, i.e. for identical input the calls to listener +// methods will be the same.

+// +//
    +//
  • Alterations to the grammar used to generate code may change the +// behavior of the listener calls.
  • +//
  • Alterations to the command line options passed to ANTLR 4 when +// generating the parser may change the behavior of the listener calls.
  • +//
  • Changing the version of the ANTLR Tool used to generate the parser +// may change the behavior of the listener calls.
  • +//
+// +// @param listener the listener to add +// +// @panics nilPointerException if {@code} listener is {@code nil} +// +func (p *BaseParser) AddParseListener(listener ParseTreeListener) { + if listener == nil { + panic("listener") + } + if p.parseListeners == nil { + p.parseListeners = make([]ParseTreeListener, 0) + } + p.parseListeners = append(p.parseListeners, listener) +} + +// +// Remove {@code listener} from the list of parse listeners. +// +//

If {@code listener} is {@code nil} or has not been added as a parse +// listener, p.method does nothing.

+// @param listener the listener to remove +// +func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) { + + if p.parseListeners != nil { + + idx := -1 + for i, v := range p.parseListeners { + if v == listener { + idx = i + break + } + } + + if idx == -1 { + return + } + + // remove the listener from the slice + p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...) + + if len(p.parseListeners) == 0 { + p.parseListeners = nil + } + } +} + +// Remove all parse listeners. +func (p *BaseParser) removeParseListeners() { + p.parseListeners = nil +} + +// Notify any parse listeners of an enter rule event. +func (p *BaseParser) TriggerEnterRuleEvent() { + if p.parseListeners != nil { + ctx := p.ctx + for _, listener := range p.parseListeners { + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) + } + } +} + +// +// Notify any parse listeners of an exit rule event. +// +// @see //addParseListener +// +func (p *BaseParser) TriggerExitRuleEvent() { + if p.parseListeners != nil { + // reverse order walk of listeners + ctx := p.ctx + l := len(p.parseListeners) - 1 + + for i := range p.parseListeners { + listener := p.parseListeners[l-i] + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) + } + } +} + +func (p *BaseParser) GetInterpreter() *ParserATNSimulator { + return p.Interpreter +} + +func (p *BaseParser) GetATN() *ATN { + return p.Interpreter.atn +} + +func (p *BaseParser) GetTokenFactory() TokenFactory { + return p.input.GetTokenSource().GetTokenFactory() +} + +// Tell our token source and error strategy about a Newway to create tokens.// +func (p *BaseParser) setTokenFactory(factory TokenFactory) { + p.input.GetTokenSource().setTokenFactory(factory) +} + +// The ATN with bypass alternatives is expensive to create so we create it +// lazily. +// +// @panics UnsupportedOperationException if the current parser does not +// implement the {@link //getSerializedATN()} method. +// +func (p *BaseParser) GetATNWithBypassAlts() { + + // TODO + panic("Not implemented!") + + // serializedAtn := p.getSerializedATN() + // if (serializedAtn == nil) { + // panic("The current parser does not support an ATN with bypass alternatives.") + // } + // result := p.bypassAltsAtnCache[serializedAtn] + // if (result == nil) { + // deserializationOptions := NewATNDeserializationOptions(nil) + // deserializationOptions.generateRuleBypassTransitions = true + // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn) + // p.bypassAltsAtnCache[serializedAtn] = result + // } + // return result +} + +// The preferred method of getting a tree pattern. For example, here's a +// sample use: +// +//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+// 
+ +func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) { + + panic("NewParseTreePatternMatcher not implemented!") + // + // if (lexer == nil) { + // if (p.GetTokenStream() != nil) { + // tokenSource := p.GetTokenStream().GetTokenSource() + // if _, ok := tokenSource.(ILexer); ok { + // lexer = tokenSource + // } + // } + // } + // if (lexer == nil) { + // panic("Parser can't discover a lexer to use") + // } + + // m := NewParseTreePatternMatcher(lexer, p) + // return m.compile(pattern, patternRuleIndex) +} + +func (p *BaseParser) GetInputStream() IntStream { + return p.GetTokenStream() +} + +func (p *BaseParser) SetInputStream(input TokenStream) { + p.SetTokenStream(input) +} + +func (p *BaseParser) GetTokenStream() TokenStream { + return p.input +} + +// Set the token stream and reset the parser.// +func (p *BaseParser) SetTokenStream(input TokenStream) { + p.input = nil + p.reset() + p.input = input +} + +// Match needs to return the current input symbol, which gets put +// into the label for the associated token ref e.g., x=ID. +// +func (p *BaseParser) GetCurrentToken() Token { + return p.input.LT(1) +} + +func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) { + if offendingToken == nil { + offendingToken = p.GetCurrentToken() + } + p._SyntaxErrors++ + line := offendingToken.GetLine() + column := offendingToken.GetColumn() + listener := p.GetErrorListenerDispatch() + listener.SyntaxError(p, offendingToken, line, column, msg, err) +} + +func (p *BaseParser) Consume() Token { + o := p.GetCurrentToken() + if o.GetTokenType() != TokenEOF { + p.GetInputStream().Consume() + } + hasListener := p.parseListeners != nil && len(p.parseListeners) > 0 + if p.BuildParseTrees || hasListener { + if p.errHandler.inErrorRecoveryMode(p) { + node := p.ctx.AddErrorNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitErrorNode(node) + } + } + + } else { + node := p.ctx.AddTokenNode(o) + if p.parseListeners != nil { + for _, l := range p.parseListeners { + l.VisitTerminal(node) + } + } + } + // node.invokingState = p.state + } + + return o +} + +func (p *BaseParser) addContextToParseTree() { + // add current context to parent if we have a parent + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx) + } +} + +func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) { + p.SetState(state) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.BuildParseTrees { + p.addContextToParseTree() + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() + } +} + +func (p *BaseParser) ExitRule() { + p.ctx.SetStop(p.input.LT(-1)) + // trigger event on ctx, before it reverts to parent + if p.parseListeners != nil { + p.TriggerExitRuleEvent() + } + p.SetState(p.ctx.GetInvokingState()) + if p.ctx.GetParent() != nil { + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } else { + p.ctx = nil + } +} + +func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) { + localctx.SetAltNumber(altNum) + // if we have Newlocalctx, make sure we replace existing ctx + // that is previous child of parse tree + if p.BuildParseTrees && p.ctx != localctx { + if p.ctx.GetParent() != nil { + p.ctx.GetParent().(ParserRuleContext).RemoveLastChild() + p.ctx.GetParent().(ParserRuleContext).AddChild(localctx) + } + } + p.ctx = localctx +} + +// Get the precedence level for the top-most precedence rule. +// +// @return The precedence level for the top-most precedence rule, or -1 if +// the parser context is not nested within a precedence rule. + +func (p *BaseParser) GetPrecedence() int { + if len(p.precedenceStack) == 0 { + return -1 + } + + return p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) { + p.SetState(state) + p.precedenceStack.Push(precedence) + p.ctx = localctx + p.ctx.SetStart(p.input.LT(1)) + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +// +// Like {@link //EnterRule} but for recursive rules. + +func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) { + previous := p.ctx + previous.SetParent(localctx) + previous.SetInvokingState(state) + previous.SetStop(p.input.LT(-1)) + + p.ctx = localctx + p.ctx.SetStart(previous.GetStart()) + if p.BuildParseTrees { + p.ctx.AddChild(previous) + } + if p.parseListeners != nil { + p.TriggerEnterRuleEvent() // simulates rule entry for + // left-recursive rules + } +} + +func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) { + p.precedenceStack.Pop() + p.ctx.SetStop(p.input.LT(-1)) + retCtx := p.ctx // save current ctx (return value) + // unroll so ctx is as it was before call to recursive method + if p.parseListeners != nil { + for p.ctx != parentCtx { + p.TriggerExitRuleEvent() + p.ctx = p.ctx.GetParent().(ParserRuleContext) + } + } else { + p.ctx = parentCtx + } + // hook into tree + retCtx.SetParent(parentCtx) + if p.BuildParseTrees && parentCtx != nil { + // add return ctx into invoking rule's tree + parentCtx.AddChild(retCtx) + } +} + +func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext { + ctx := p.ctx + for ctx != nil { + if ctx.GetRuleIndex() == ruleIndex { + return ctx + } + ctx = ctx.GetParent().(ParserRuleContext) + } + return nil +} + +func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool { + return precedence >= p.precedenceStack[len(p.precedenceStack)-1] +} + +func (p *BaseParser) inContext(context ParserRuleContext) bool { + // TODO: useful in parser? + return false +} + +// +// Checks whether or not {@code symbol} can follow the current state in the +// ATN. The behavior of p.method is equivalent to the following, but is +// implemented such that the complete context-sensitive follow set does not +// need to be explicitly constructed. +// +//
+// return getExpectedTokens().contains(symbol)
+// 
+// +// @param symbol the symbol type to check +// @return {@code true} if {@code symbol} can follow the current state in +// the ATN, otherwise {@code false}. + +func (p *BaseParser) IsExpectedToken(symbol int) bool { + atn := p.Interpreter.atn + ctx := p.ctx + s := atn.states[p.state] + following := atn.NextTokens(s, nil) + if following.contains(symbol) { + return true + } + if !following.contains(TokenEpsilon) { + return false + } + for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) { + invokingState := atn.states[ctx.GetInvokingState()] + rt := invokingState.GetTransitions()[0] + following = atn.NextTokens(rt.(*RuleTransition).followState, nil) + if following.contains(symbol) { + return true + } + ctx = ctx.GetParent().(ParserRuleContext) + } + if following.contains(TokenEpsilon) && symbol == TokenEOF { + return true + } + + return false +} + +// Computes the set of input symbols which could follow the current parser +// state and context, as given by {@link //GetState} and {@link //GetContext}, +// respectively. +// +// @see ATN//getExpectedTokens(int, RuleContext) +// +func (p *BaseParser) GetExpectedTokens() *IntervalSet { + return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx) +} + +func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet { + atn := p.Interpreter.atn + s := atn.states[p.state] + return atn.NextTokens(s, nil) +} + +// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.// +func (p *BaseParser) GetRuleIndex(ruleName string) int { + var ruleIndex, ok = p.GetRuleIndexMap()[ruleName] + if ok { + return ruleIndex + } + + return -1 +} + +// Return List<String> of the rule names in your parser instance +// leading up to a call to the current rule. You could override if +// you want more details such as the file/line info of where +// in the ATN a rule is invoked. +// +// this very useful for error messages. + +func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string { + if c == nil { + c = p.ctx + } + stack := make([]string, 0) + for c != nil { + // compute what follows who invoked us + ruleIndex := c.GetRuleIndex() + if ruleIndex < 0 { + stack = append(stack, "n/a") + } else { + stack = append(stack, p.GetRuleNames()[ruleIndex]) + } + + vp := c.GetParent() + + if vp == nil { + break + } + + c = vp.(ParserRuleContext) + } + return stack +} + +// For debugging and other purposes.// +func (p *BaseParser) GetDFAStrings() string { + return fmt.Sprint(p.Interpreter.decisionToDFA) +} + +// For debugging and other purposes.// +func (p *BaseParser) DumpDFA() { + seenOne := false + for _, dfa := range p.Interpreter.decisionToDFA { + if dfa.numStates() > 0 { + if seenOne { + fmt.Println() + } + fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":") + fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames)) + seenOne = true + } + } +} + +func (p *BaseParser) GetSourceName() string { + return p.GrammarFileName +} + +// During a parse is sometimes useful to listen in on the rule entry and exit +// events as well as token Matches. p.is for quick and dirty debugging. +// +func (p *BaseParser) SetTrace(trace *TraceListener) { + if trace == nil { + p.RemoveParseListener(p.tracer) + p.tracer = nil + } else { + if p.tracer != nil { + p.RemoveParseListener(p.tracer) + } + p.tracer = NewTraceListener(p) + p.AddParseListener(p.tracer) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go new file mode 100644 index 0000000000..128b9a96d4 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go @@ -0,0 +1,1473 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" + "strings" +) + +var ( + ParserATNSimulatorDebug = false + ParserATNSimulatorListATNDecisions = false + ParserATNSimulatorDFADebug = false + ParserATNSimulatorRetryDebug = false +) + +type ParserATNSimulator struct { + *BaseATNSimulator + + parser Parser + predictionMode int + input TokenStream + startIndex int + dfa *DFA + mergeCache *DoubleDict + outerContext ParserRuleContext +} + +func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator { + + p := new(ParserATNSimulator) + + p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache) + + p.parser = parser + p.decisionToDFA = decisionToDFA + // SLL, LL, or LL + exact ambig detection?// + p.predictionMode = PredictionModeLL + // LAME globals to avoid parameters!!!!! I need these down deep in predTransition + p.input = nil + p.startIndex = 0 + p.outerContext = nil + p.dfa = nil + // Each prediction operation uses a cache for merge of prediction contexts. + // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap + // isn't Synchronized but we're ok since two threads shouldn't reuse same + // parser/atnsim object because it can only handle one input at a time. + // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid + // the merge if we ever see a and b again. Note that (b,a)&rarrc should + // also be examined during cache lookup. + // + p.mergeCache = nil + + return p +} + +func (p *ParserATNSimulator) GetPredictionMode() int { + return p.predictionMode +} + +func (p *ParserATNSimulator) SetPredictionMode(v int) { + p.predictionMode = v +} + +func (p *ParserATNSimulator) reset() { +} + +func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int { + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + + strconv.Itoa(input.LT(1).GetColumn())) + } + + p.input = input + p.startIndex = input.Index() + p.outerContext = outerContext + + dfa := p.decisionToDFA[decision] + p.dfa = dfa + m := input.Mark() + index := input.Index() + + defer func() { + p.dfa = nil + p.mergeCache = nil // wack cache after each prediction + input.Seek(index) + input.Release(m) + }() + + // Now we are certain to have a specific decision's DFA + // But, do we still need an initial state? + var s0 *DFAState + if dfa.precedenceDfa { + // the start state for a precedence DFA depends on the current + // parser precedence, and is provided by a DFA method. + s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence()) + } else { + // the start state for a "regular" DFA is just s0 + s0 = dfa.s0 + } + + if s0 == nil { + if outerContext == nil { + outerContext = RuleContextEmpty + } + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil)) + } + // If p is not a precedence DFA, we check the ATN start state + // to determine if p ATN start state is the decision for the + // closure block that determines whether a precedence rule + // should continue or complete. + + t2 := dfa.atnStartState + t, ok := t2.(*StarLoopEntryState) + if !dfa.precedenceDfa && ok { + if t.precedenceRuleDecision { + dfa.setPrecedenceDfa(true) + } + } + fullCtx := false + s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx) + + if dfa.precedenceDfa { + // If p is a precedence DFA, we use applyPrecedenceFilter + // to convert the computed start state to a precedence start + // state. We then use DFA.setPrecedenceStartState to set the + // appropriate start state for the precedence level rather + // than simply setting DFA.s0. + // + s0Closure = p.applyPrecedenceFilter(s0Closure) + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0) + } else { + s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure)) + dfa.s0 = s0 + } + } + alt := p.execATN(dfa, s0, input, index, outerContext) + if ParserATNSimulatorDebug { + fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil)) + } + return alt + +} + +// Performs ATN simulation to compute a predicted alternative based +// upon the remaining input, but also updates the DFA cache to avoid +// having to traverse the ATN again for the same input sequence. + +// There are some key conditions we're looking for after computing a new +// set of ATN configs (proposed DFA state): +// if the set is empty, there is no viable alternative for current symbol +// does the state uniquely predict an alternative? +// does the state have a conflict that would prevent us from +// putting it on the work list? + +// We also have some key operations to do: +// add an edge from previous DFA state to potentially NewDFA state, D, +// upon current symbol but only if adding to work list, which means in all +// cases except no viable alternative (and possibly non-greedy decisions?) +// collecting predicates and adding semantic context to DFA accept states +// adding rule context to context-sensitive DFA accept states +// consuming an input symbol +// Reporting a conflict +// Reporting an ambiguity +// Reporting a context sensitivity +// Reporting insufficient predicates + +// cover these cases: +// dead end +// single alt +// single alt + preds +// conflict +// conflict + preds +// +func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) + + " exec LA(1)==" + p.getLookaheadName(input) + + " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn())) + } + + previousD := s0 + + if ParserATNSimulatorDebug { + fmt.Println("s0 = " + s0.String()) + } + t := input.LA(1) + for { // for more work + D := p.getExistingTargetState(previousD, t) + if D == nil { + D = p.computeTargetState(dfa, previousD, t) + } + if D == ATNSimulatorError { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for SLL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previousD.configs, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + if D.requiresFullContext && p.predictionMode != PredictionModeSLL { + // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error) + conflictingAlts := D.configs.GetConflictingAlts() + if D.predicates != nil { + if ParserATNSimulatorDebug { + fmt.Println("DFA state has preds in DFA sim LL failover") + } + conflictIndex := input.Index() + if conflictIndex != startIndex { + input.Seek(startIndex) + } + conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true) + if conflictingAlts.length() == 1 { + if ParserATNSimulatorDebug { + fmt.Println("Full LL avoided") + } + return conflictingAlts.minValue() + } + if conflictIndex != startIndex { + // restore the index so Reporting the fallback to full + // context occurs with the index at the correct spot + input.Seek(conflictIndex) + } + } + if ParserATNSimulatorDFADebug { + fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String()) + } + fullCtx := true + s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx) + p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index()) + alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext) + return alt + } + if D.isAcceptState { + if D.predicates == nil { + return D.prediction + } + stopIndex := input.Index() + input.Seek(startIndex) + alts := p.evalSemanticContext(D.predicates, outerContext, true) + if alts.length() == 0 { + panic(p.noViableAlt(input, outerContext, D.configs, startIndex)) + } else if alts.length() == 1 { + return alts.minValue() + } else { + // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported. + p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs) + return alts.minValue() + } + } + previousD = D + + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + + panic("Should not have reached p state") +} + +// Get an existing target state for an edge in the DFA. If the target state +// for the edge has not yet been computed or is otherwise not available, +// p method returns {@code nil}. +// +// @param previousD The current DFA state +// @param t The next input symbol +// @return The existing target DFA state for the given input symbol +// {@code t}, or {@code nil} if the target state for p edge is not +// already cached + +func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState { + edges := previousD.edges + if edges == nil { + return nil + } + + return edges[t+1] +} + +// Compute a target state for an edge in the DFA, and attempt to add the +// computed state and corresponding edge to the DFA. +// +// @param dfa The DFA +// @param previousD The current DFA state +// @param t The next input symbol +// +// @return The computed target DFA state for the given input symbol +// {@code t}. If {@code t} does not lead to a valid DFA state, p method +// returns {@link //ERROR}. + +func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState { + reach := p.computeReachSet(previousD.configs, t, false) + + if reach == nil { + p.addDFAEdge(dfa, previousD, t, ATNSimulatorError) + return ATNSimulatorError + } + // create Newtarget state we'll add to DFA after it's complete + D := NewDFAState(-1, reach) + + predictedAlt := p.getUniqueAlt(reach) + + if ParserATNSimulatorDebug { + altSubSets := PredictionModegetConflictingAltSubsets(reach) + fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) + + ", previous=" + previousD.configs.String() + + ", configs=" + reach.String() + + ", predict=" + strconv.Itoa(predictedAlt) + + ", allSubsetsConflict=" + + fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) + + ", conflictingAlts=" + p.getConflictingAlts(reach).String()) + } + if predictedAlt != ATNInvalidAltNumber { + // NO CONFLICT, UNIQUELY PREDICTED ALT + D.isAcceptState = true + D.configs.SetUniqueAlt(predictedAlt) + D.setPrediction(predictedAlt) + } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) { + // MORE THAN ONE VIABLE ALTERNATIVE + D.configs.SetConflictingAlts(p.getConflictingAlts(reach)) + D.requiresFullContext = true + // in SLL-only mode, we will stop at p state and return the minimum alt + D.isAcceptState = true + D.setPrediction(D.configs.GetConflictingAlts().minValue()) + } + if D.isAcceptState && D.configs.HasSemanticContext() { + p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision)) + if D.predicates != nil { + D.setPrediction(ATNInvalidAltNumber) + } + } + // all adds to dfa are done after we've created full D state + D = p.addDFAEdge(dfa, previousD, t, D) + return D +} + +func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) { + // We need to test all predicates, even in DFA states that + // uniquely predict alternative. + nalts := len(decisionState.GetTransitions()) + // Update DFA so reach becomes accept state with (predicate,alt) + // pairs if preds found for conflicting alts + altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs) + altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts) + if altToPred != nil { + dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred) + dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds + } else { + // There are preds in configs but they might go away + // when OR'd together like {p}? || NONE == NONE. If neither + // alt has preds, resolve to min alt + dfaState.setPrediction(altsToCollectPredsFrom.minValue()) + } +} + +// comes back with reach.uniqueAlt set to a valid alt +func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int { + + if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions { + fmt.Println("execATNWithFullContext " + s0.String()) + } + + fullCtx := true + foundExactAmbig := false + var reach ATNConfigSet + previous := s0 + input.Seek(startIndex) + t := input.LA(1) + predictedAlt := -1 + + for { // for more work + reach = p.computeReachSet(previous, t, fullCtx) + if reach == nil { + // if any configs in previous dipped into outer context, that + // means that input up to t actually finished entry rule + // at least for LL decision. Full LL doesn't dip into outer + // so don't need special case. + // We will get an error no matter what so delay until after + // decision better error message. Also, no reachable target + // ATN states in SLL implies LL will also get nowhere. + // If conflict in states that dip out, choose min since we + // will get error no matter what. + e := p.noViableAlt(input, outerContext, previous, startIndex) + input.Seek(startIndex) + alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext) + if alt != ATNInvalidAltNumber { + return alt + } + + panic(e) + } + altSubSets := PredictionModegetConflictingAltSubsets(reach) + if ParserATNSimulatorDebug { + fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" + + strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" + + fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets))) + } + reach.SetUniqueAlt(p.getUniqueAlt(reach)) + // unique prediction? + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + predictedAlt = reach.GetUniqueAlt() + break + } else if p.predictionMode != PredictionModeLLExactAmbigDetection { + predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets) + if predictedAlt != ATNInvalidAltNumber { + break + } + } else { + // In exact ambiguity mode, we never try to terminate early. + // Just keeps scarfing until we know what the conflict is + if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) { + foundExactAmbig = true + predictedAlt = PredictionModegetSingleViableAlt(altSubSets) + break + } + // else there are multiple non-conflicting subsets or + // we're not sure what the ambiguity is yet. + // So, keep going. + } + previous = reach + if t != TokenEOF { + input.Consume() + t = input.LA(1) + } + } + // If the configuration set uniquely predicts an alternative, + // without conflict, then we know that it's a full LL decision + // not SLL. + if reach.GetUniqueAlt() != ATNInvalidAltNumber { + p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index()) + return predictedAlt + } + // We do not check predicates here because we have checked them + // on-the-fly when doing full context prediction. + + // + // In non-exact ambiguity detection mode, we might actually be able to + // detect an exact ambiguity, but I'm not going to spend the cycles + // needed to check. We only emit ambiguity warnings in exact ambiguity + // mode. + // + // For example, we might know that we have conflicting configurations. + // But, that does not mean that there is no way forward without a + // conflict. It's possible to have nonconflicting alt subsets as in: + + // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}] + + // from + // + // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]), + // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])] + // + // In p case, (17,1,[5 $]) indicates there is some next sequence that + // would resolve p without conflict to alternative 1. Any other viable + // next sequence, however, is associated with a conflict. We stop + // looking for input because no amount of further lookahead will alter + // the fact that we should predict alternative 1. We just can't say for + // sure that there is an ambiguity without looking further. + + p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, nil, reach) + + return predictedAlt +} + +func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet { + if ParserATNSimulatorDebug { + fmt.Println("in computeReachSet, starting closure: " + closure.String()) + } + if p.mergeCache == nil { + p.mergeCache = NewDoubleDict() + } + intermediate := NewBaseATNConfigSet(fullCtx) + + // Configurations already in a rule stop state indicate reaching the end + // of the decision rule (local context) or end of the start rule (full + // context). Once reached, these configurations are never updated by a + // closure operation, so they are handled separately for the performance + // advantage of having a smaller intermediate set when calling closure. + // + // For full-context reach operations, separate handling is required to + // ensure that the alternative Matching the longest overall sequence is + // chosen when multiple such configurations can Match the input. + + var SkippedStopStates []*BaseATNConfig + + // First figure out where we can reach on input t + for _, c := range closure.GetItems() { + if ParserATNSimulatorDebug { + fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String()) + } + + _, ok := c.GetState().(*RuleStopState) + + if ok { + if fullCtx || t == TokenEOF { + if SkippedStopStates == nil { + SkippedStopStates = make([]*BaseATNConfig, 0) + } + SkippedStopStates = append(SkippedStopStates, c.(*BaseATNConfig)) + if ParserATNSimulatorDebug { + fmt.Println("added " + c.String() + " to SkippedStopStates") + } + } + continue + } + + for j := 0; j < len(c.GetState().GetTransitions()); j++ { + trans := c.GetState().GetTransitions()[j] + target := p.getReachableTarget(trans, t) + if target != nil { + cfg := NewBaseATNConfig4(c, target) + intermediate.Add(cfg, p.mergeCache) + if ParserATNSimulatorDebug { + fmt.Println("added " + cfg.String() + " to intermediate") + } + } + } + } + // Now figure out where the reach operation can take us... + var reach ATNConfigSet + + // This block optimizes the reach operation for intermediate sets which + // trivially indicate a termination state for the overall + // AdaptivePredict operation. + // + // The conditions assume that intermediate + // contains all configurations relevant to the reach set, but p + // condition is not true when one or more configurations have been + // withheld in SkippedStopStates, or when the current symbol is EOF. + // + if SkippedStopStates == nil && t != TokenEOF { + if len(intermediate.configs) == 1 { + // Don't pursue the closure if there is just one state. + // It can only have one alternative just add to result + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber { + // Also don't pursue the closure if there is unique alternative + // among the configurations. + reach = intermediate + } + } + // If the reach set could not be trivially determined, perform a closure + // operation on the intermediate set to compute its initial value. + // + if reach == nil { + reach = NewBaseATNConfigSet(fullCtx) + closureBusy := NewSet(nil, nil) + treatEOFAsEpsilon := t == TokenEOF + for k := 0; k < len(intermediate.configs); k++ { + p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon) + } + } + if t == TokenEOF { + // After consuming EOF no additional input is possible, so we are + // only interested in configurations which reached the end of the + // decision rule (local context) or end of the start rule (full + // context). Update reach to contain only these configurations. This + // handles both explicit EOF transitions in the grammar and implicit + // EOF transitions following the end of the decision or start rule. + // + // When reach==intermediate, no closure operation was performed. In + // p case, removeAllConfigsNotInRuleStopState needs to check for + // reachable rule stop states as well as configurations already in + // a rule stop state. + // + // This is handled before the configurations in SkippedStopStates, + // because any configurations potentially added from that list are + // already guaranteed to meet p condition whether or not it's + // required. + // + reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate) + } + // If SkippedStopStates!=nil, then it contains at least one + // configuration. For full-context reach operations, these + // configurations reached the end of the start rule, in which case we + // only add them back to reach if no configuration during the current + // closure operation reached such a state. This ensures AdaptivePredict + // chooses an alternative Matching the longest overall sequence when + // multiple alternatives are viable. + // + if SkippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) { + for l := 0; l < len(SkippedStopStates); l++ { + reach.Add(SkippedStopStates[l], p.mergeCache) + } + } + if len(reach.GetItems()) == 0 { + return nil + } + + return reach +} + +// +// Return a configuration set containing only the configurations from +// {@code configs} which are in a {@link RuleStopState}. If all +// configurations in {@code configs} are already in a rule stop state, p +// method simply returns {@code configs}. +// +//

When {@code lookToEndOfRule} is true, p method uses +// {@link ATN//NextTokens} for each configuration in {@code configs} which is +// not already in a rule stop state to see if a rule stop state is reachable +// from the configuration via epsilon-only transitions.

+// +// @param configs the configuration set to update +// @param lookToEndOfRule when true, p method checks for rule stop states +// reachable by epsilon-only transitions from each configuration in +// {@code configs}. +// +// @return {@code configs} if all configurations in {@code configs} are in a +// rule stop state, otherwise return a Newconfiguration set containing only +// the configurations from {@code configs} which are in a rule stop state +// +func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet { + if PredictionModeallConfigsInRuleStopStates(configs) { + return configs + } + result := NewBaseATNConfigSet(configs.FullContext()) + for _, config := range configs.GetItems() { + + _, ok := config.GetState().(*RuleStopState) + + if ok { + result.Add(config, p.mergeCache) + continue + } + if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() { + NextTokens := p.atn.NextTokens(config.GetState(), nil) + if NextTokens.contains(TokenEpsilon) { + endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()] + result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache) + } + } + } + return result +} + +func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet { + // always at least the implicit call to start rule + initialContext := predictionContextFromRuleContext(p.atn, ctx) + configs := NewBaseATNConfigSet(fullCtx) + for i := 0; i < len(a.GetTransitions()); i++ { + target := a.GetTransitions()[i].getTarget() + c := NewBaseATNConfig6(target, i+1, initialContext) + closureBusy := NewSet(nil, nil) + p.closure(c, configs, closureBusy, true, fullCtx, false) + } + return configs +} + +// +// This method transforms the start state computed by +// {@link //computeStartState} to the special start state used by a +// precedence DFA for a particular precedence value. The transformation +// process applies the following changes to the start state's configuration +// set. +// +//
    +//
  1. Evaluate the precedence predicates for each configuration using +// {@link SemanticContext//evalPrecedence}.
  2. +//
  3. Remove all configurations which predict an alternative greater than +// 1, for which another configuration that predicts alternative 1 is in the +// same ATN state with the same prediction context. This transformation is +// valid for the following reasons: +//
      +//
    • The closure block cannot contain any epsilon transitions which bypass +// the body of the closure, so all states reachable via alternative 1 are +// part of the precedence alternatives of the transformed left-recursive +// rule.
    • +//
    • The "primary" portion of a left recursive rule cannot contain an +// epsilon transition, so the only way an alternative other than 1 can exist +// in a state that is also reachable via alternative 1 is by nesting calls +// to the left-recursive rule, with the outer calls not being at the +// preferred precedence level.
    • +//
    +//
  4. +//
+// +//

+// The prediction context must be considered by p filter to address +// situations like the following. +//

+// +//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+// 
+//
+//

+// If the above grammar, the ATN state immediately before the token +// reference {@code 'a'} in {@code letterA} is reachable from the left edge +// of both the primary and closure blocks of the left-recursive rule +// {@code statement}. The prediction context associated with each of these +// configurations distinguishes between them, and prevents the alternative +// which stepped out to {@code prog} (and then back in to {@code statement} +// from being eliminated by the filter. +//

+// +// @param configs The configuration set computed by +// {@link //computeStartState} as the start state for the DFA. +// @return The transformed configuration set representing the start state +// for a precedence DFA at a particular precedence level (determined by +// calling {@link Parser//getPrecedence}). +// +func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet { + + statesFromAlt1 := make(map[int]PredictionContext) + configSet := NewBaseATNConfigSet(configs.FullContext()) + + for _, config := range configs.GetItems() { + // handle alt 1 first + if config.GetAlt() != 1 { + continue + } + updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext) + if updatedContext == nil { + // the configuration was eliminated + continue + } + statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext() + if updatedContext != config.GetSemanticContext() { + configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache) + } else { + configSet.Add(config, p.mergeCache) + } + } + for _, config := range configs.GetItems() { + + if config.GetAlt() == 1 { + // already handled + continue + } + // In the future, p elimination step could be updated to also + // filter the prediction context for alternatives predicting alt>1 + // (basically a graph subtraction algorithm). + if !config.getPrecedenceFilterSuppressed() { + context := statesFromAlt1[config.GetState().GetStateNumber()] + if context != nil && context.equals(config.GetContext()) { + // eliminated + continue + } + } + configSet.Add(config, p.mergeCache) + } + return configSet +} + +func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState { + if trans.Matches(ttype, 0, p.atn.maxTokenType) { + return trans.getTarget() + } + + return nil +} + +func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext { + + altToPred := make([]SemanticContext, nalts+1) + for _, c := range configs.GetItems() { + if ambigAlts.contains(c.GetAlt()) { + altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext()) + } + } + nPredAlts := 0 + for i := 1; i < nalts+1; i++ { + pred := altToPred[i] + if pred == nil { + altToPred[i] = SemanticContextNone + } else if pred != SemanticContextNone { + nPredAlts++ + } + } + // nonambig alts are nil in altToPred + if nPredAlts == 0 { + altToPred = nil + } + if ParserATNSimulatorDebug { + fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred)) + } + return altToPred +} + +func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction { + pairs := make([]*PredPrediction, 0) + containsPredicate := false + for i := 1; i < len(altToPred); i++ { + pred := altToPred[i] + // unpredicated is indicated by SemanticContextNONE + if ambigAlts != nil && ambigAlts.contains(i) { + pairs = append(pairs, NewPredPrediction(pred, i)) + } + if pred != SemanticContextNone { + containsPredicate = true + } + } + if !containsPredicate { + return nil + } + return pairs +} + +// +// This method is used to improve the localization of error messages by +// choosing an alternative rather than panicing a +// {@link NoViableAltException} in particular prediction scenarios where the +// {@link //ERROR} state was reached during ATN simulation. +// +//

+// The default implementation of p method uses the following +// algorithm to identify an ATN configuration which successfully parsed the +// decision entry rule. Choosing such an alternative ensures that the +// {@link ParserRuleContext} returned by the calling rule will be complete +// and valid, and the syntax error will be Reported later at a more +// localized location.

+// +//
    +//
  • If a syntactically valid path or paths reach the end of the decision rule and +// they are semantically valid if predicated, return the min associated alt.
  • +//
  • Else, if a semantically invalid but syntactically valid path exist +// or paths exist, return the minimum associated alt. +//
  • +//
  • Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
  • +//
+// +//

+// In some scenarios, the algorithm described above could predict an +// alternative which will result in a {@link FailedPredicateException} in +// the parser. Specifically, p could occur if the only configuration +// capable of successfully parsing to the end of the decision rule is +// blocked by a semantic predicate. By choosing p alternative within +// {@link //AdaptivePredict} instead of panicing a +// {@link NoViableAltException}, the resulting +// {@link FailedPredicateException} in the parser will identify the specific +// predicate which is preventing the parser from successfully parsing the +// decision rule, which helps developers identify and correct logic errors +// in semantic predicates. +//

+// +// @param configs The ATN configurations which were valid immediately before +// the {@link //ERROR} state was reached +// @param outerContext The is the \gamma_0 initial parser context from the paper +// or the parser stack at the instant before prediction commences. +// +// @return The value to return from {@link //AdaptivePredict}, or +// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not +// identified and {@link //AdaptivePredict} should Report an error instead. +// +func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int { + cfgs := p.splitAccordingToSemanticValidity(configs, outerContext) + semValidConfigs := cfgs[0] + semInvalidConfigs := cfgs[1] + alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs) + if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists + return alt + } + // Is there a syntactically valid path with a failed pred? + if len(semInvalidConfigs.GetItems()) > 0 { + alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs) + if alt != ATNInvalidAltNumber { // syntactically viable path exists + return alt + } + } + return ATNInvalidAltNumber +} + +func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int { + alts := NewIntervalSet() + + for _, c := range configs.GetItems() { + _, ok := c.GetState().(*RuleStopState) + + if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) { + alts.addOne(c.GetAlt()) + } + } + if alts.length() == 0 { + return ATNInvalidAltNumber + } + + return alts.first() +} + +// Walk the list of configurations and split them according to +// those that have preds evaluating to true/false. If no pred, assume +// true pred and include in succeeded set. Returns Pair of sets. +// +// Create a NewSet so as not to alter the incoming parameter. +// +// Assumption: the input stream has been restored to the starting point +// prediction, which is where predicates need to evaluate. + +type ATNConfigSetPair struct { + item0, item1 ATNConfigSet +} + +func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet { + succeeded := NewBaseATNConfigSet(configs.FullContext()) + failed := NewBaseATNConfigSet(configs.FullContext()) + + for _, c := range configs.GetItems() { + if c.GetSemanticContext() != SemanticContextNone { + predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext) + if predicateEvaluationResult { + succeeded.Add(c, nil) + } else { + failed.Add(c, nil) + } + } else { + succeeded.Add(c, nil) + } + } + return []ATNConfigSet{succeeded, failed} +} + +// Look through a list of predicate/alt pairs, returning alts for the +// pairs that win. A {@code NONE} predicate indicates an alt containing an +// unpredicated config which behaves as "always true." If !complete +// then we stop at the first predicate that evaluates to true. This +// includes pairs with nil predicates. +// +func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet { + predictions := NewBitSet() + for i := 0; i < len(predPredictions); i++ { + pair := predPredictions[i] + if pair.pred == SemanticContextNone { + predictions.add(pair.alt) + if !complete { + break + } + continue + } + + predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext) + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult)) + } + if predicateEvaluationResult { + if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug { + fmt.Println("PREDICT " + fmt.Sprint(pair.alt)) + } + predictions.add(pair.alt) + if !complete { + break + } + } + } + return predictions +} + +func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) { + initialDepth := 0 + p.closureCheckingStopState(config, configs, closureBusy, collectPredicates, + fullCtx, initialDepth, treatEOFAsEpsilon) +} + +func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + + if ParserATNSimulatorDebug { + fmt.Println("closure(" + config.String() + ")") + fmt.Println("configs(" + configs.String() + ")") + if config.GetReachesIntoOuterContext() > 50 { + panic("problem") + } + } + + _, ok := config.GetState().(*RuleStopState) + if ok { + // We hit rule end. If we have context info, use it + // run thru all possible stack tops in ctx + if !config.GetContext().isEmpty() { + for i := 0; i < config.GetContext().length(); i++ { + if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState { + if fullCtx { + configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache) + continue + } else { + // we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) + } + continue + } + returnState := p.atn.states[config.GetContext().getReturnState(i)] + newContext := config.GetContext().GetParent(i) // "pop" return state + + c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext()) + // While we have context to pop back from, we may have + // gotten that context AFTER having falling off a rule. + // Make sure we track that we are now out of context. + c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext()) + p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon) + } + return + } else if fullCtx { + // reached end of start rule + configs.Add(config, p.mergeCache) + return + } else { + // else if we have no context info, just chase follow links (if greedy) + if ParserATNSimulatorDebug { + fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex())) + } + } + } + p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon) +} + +// Do the actual work of walking epsilon edges// +func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy *Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) { + state := config.GetState() + // optimization + if !state.GetEpsilonOnlyTransitions() { + configs.Add(config, p.mergeCache) + // make sure to not return here, because EOF transitions can act as + // both epsilon transitions and non-epsilon transitions. + } + for i := 0; i < len(state.GetTransitions()); i++ { + t := state.GetTransitions()[i] + _, ok := t.(*ActionTransition) + continueCollecting := collectPredicates && !ok + c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon) + if ci, ok := c.(*BaseATNConfig); ok && ci != nil { + if !t.getIsEpsilon() && closureBusy.add(c) != c { + // avoid infinite recursion for EOF* and EOF+ + continue + } + newDepth := depth + + if _, ok := config.GetState().(*RuleStopState); ok { + + // target fell off end of rule mark resulting c as having dipped into outer context + // We can't get here if incoming config was rule stop and we had context + // track how far we dip into outer context. Might + // come in handy and we avoid evaluating context dependent + // preds if p is > 0. + + if closureBusy.add(c) != c { + // avoid infinite recursion for right-recursive rules + continue + } + + if p.dfa != nil && p.dfa.precedenceDfa { + if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() { + c.setPrecedenceFilterSuppressed(true) + } + } + + c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1) + configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method + newDepth-- + if ParserATNSimulatorDebug { + fmt.Println("dips into outer ctx: " + c.String()) + } + } else if _, ok := t.(*RuleTransition); ok { + // latch when newDepth goes negative - once we step out of the entry context we can't return + if newDepth >= 0 { + newDepth++ + } + } + p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon) + } + } +} + +func (p *ParserATNSimulator) getRuleName(index int) string { + if p.parser != nil && index >= 0 { + return p.parser.GetRuleNames()[index] + } + + return "" +} + +func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig { + + switch t.getSerializationType() { + case TransitionRULE: + return p.ruleTransition(config, t.(*RuleTransition)) + case TransitionPRECEDENCE: + return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionPREDICATE: + return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx) + case TransitionACTION: + return p.actionTransition(config, t.(*ActionTransition)) + case TransitionEPSILON: + return NewBaseATNConfig4(config, t.getTarget()) + case TransitionATOM: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + case TransitionRANGE: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + case TransitionSET: + // EOF transitions act like epsilon transitions after the first EOF + // transition is traversed + if treatEOFAsEpsilon { + if t.Matches(TokenEOF, 0, 1) { + return NewBaseATNConfig4(config, t.getTarget()) + } + } + return nil + default: + return nil + } +} + +func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)) + } + return NewBaseATNConfig4(config, t.getTarget()) +} + +func (p *ParserATNSimulator) precedenceTransition(config ATNConfig, + pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + + strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true") + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && inContext { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig { + + if ParserATNSimulatorDebug { + fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) + + ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent)) + if p.parser != nil { + fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil))) + } + } + var c *BaseATNConfig + if collectPredicates && ((pt.isCtxDependent && inContext) || !pt.isCtxDependent) { + if fullCtx { + // In full context mode, we can evaluate predicates on-the-fly + // during closure, which dramatically reduces the size of + // the config sets. It also obviates the need to test predicates + // later during conflict resolution. + currentPosition := p.input.Index() + p.input.Seek(p.startIndex) + predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext) + p.input.Seek(currentPosition) + if predSucceeds { + c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context + } + } else { + newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate()) + c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx) + } + } else { + c = NewBaseATNConfig4(config, pt.getTarget()) + } + if ParserATNSimulatorDebug { + fmt.Println("config from pred transition=" + c.String()) + } + return c +} + +func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig { + if ParserATNSimulatorDebug { + fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String()) + } + returnState := t.followState + newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber()) + return NewBaseATNConfig1(config, t.getTarget(), newContext) +} + +func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet { + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModeGetAlts(altsets) +} + +// Sam pointed out a problem with the previous definition, v3, of +// ambiguous states. If we have another state associated with conflicting +// alternatives, we should keep going. For example, the following grammar +// +// s : (ID | ID ID?) '' +// +// When the ATN simulation reaches the state before '', it has a DFA +// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally +// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node +// because alternative to has another way to continue, via [6|2|[]]. +// The key is that we have a single state that has config's only associated +// with a single alternative, 2, and crucially the state transitions +// among the configurations are all non-epsilon transitions. That means +// we don't consider any conflicts that include alternative 2. So, we +// ignore the conflict between alts 1 and 2. We ignore a set of +// conflicting alts when there is an intersection with an alternative +// associated with a single alt state in the state&rarrconfig-list map. +// +// It's also the case that we might have two conflicting configurations but +// also a 3rd nonconflicting configuration for a different alternative: +// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar: +// +// a : A | A | A B +// +// After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not +// stop working on p state. In the previous example, we're concerned +// with states associated with the conflicting alternatives. Here alt +// 3 is not associated with the conflicting configs, but since we can continue +// looking for input reasonably, I don't declare the state done. We +// ignore a set of conflicting alts when we have an alternative +// that we still need to pursue. +// + +func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet { + var conflictingAlts *BitSet + if configs.GetUniqueAlt() != ATNInvalidAltNumber { + conflictingAlts = NewBitSet() + conflictingAlts.add(configs.GetUniqueAlt()) + } else { + conflictingAlts = configs.GetConflictingAlts() + } + return conflictingAlts +} + +func (p *ParserATNSimulator) GetTokenName(t int) string { + if t == TokenEOF { + return "EOF" + } + + if p.parser != nil && p.parser.GetLiteralNames() != nil { + if t >= len(p.parser.GetLiteralNames()) { + fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ",")) + // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect + } else { + return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">" + } + } + + return strconv.Itoa(t) +} + +func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string { + return p.GetTokenName(input.LA(1)) +} + +// Used for debugging in AdaptivePredict around execATN but I cut +// it out for clarity now that alg. works well. We can leave p +// "dead" code for a bit. +// +func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) { + + panic("Not implemented") + + // fmt.Println("dead end configs: ") + // var decs = nvae.deadEndConfigs + // + // for i:=0; i0) { + // var t = c.state.GetTransitions()[0] + // if t2, ok := t.(*AtomTransition); ok { + // trans = "Atom "+ p.GetTokenName(t2.label) + // } else if t3, ok := t.(SetTransition); ok { + // _, ok := t.(*NotSetTransition) + // + // var s string + // if (ok){ + // s = "~" + // } + // + // trans = s + "Set " + t3.set + // } + // } + // fmt.Errorf(c.String(p.parser, true) + ":" + trans) + // } +} + +func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException { + return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext) +} + +func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int { + alt := ATNInvalidAltNumber + for _, c := range configs.GetItems() { + if alt == ATNInvalidAltNumber { + alt = c.GetAlt() // found first alt + } else if c.GetAlt() != alt { + return ATNInvalidAltNumber + } + } + return alt +} + +// +// Add an edge to the DFA, if possible. This method calls +// {@link //addDFAState} to ensure the {@code to} state is present in the +// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the +// range of edges that can be represented in the DFA tables, p method +// returns without adding the edge to the DFA. +// +//

If {@code to} is {@code nil}, p method returns {@code nil}. +// Otherwise, p method returns the {@link DFAState} returned by calling +// {@link //addDFAState} for the {@code to} state.

+// +// @param dfa The DFA +// @param from The source state for the edge +// @param t The input symbol +// @param to The target state for the edge +// +// @return If {@code to} is {@code nil}, p method returns {@code nil} +// otherwise p method returns the result of calling {@link //addDFAState} +// on {@code to} +// +func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState { + if ParserATNSimulatorDebug { + fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t)) + } + if to == nil { + return nil + } + to = p.addDFAState(dfa, to) // used existing if possible not incoming + if from == nil || t < -1 || t > p.atn.maxTokenType { + return to + } + if from.edges == nil { + from.edges = make([]*DFAState, p.atn.maxTokenType+1+1) + } + from.edges[t+1] = to // connect + + if ParserATNSimulatorDebug { + var names []string + if p.parser != nil { + names = p.parser.GetLiteralNames() + } + + fmt.Println("DFA=\n" + dfa.String(names, nil)) + } + return to +} + +// +// Add state {@code D} to the DFA if it is not already present, and return +// the actual instance stored in the DFA. If a state equivalent to {@code D} +// is already in the DFA, the existing state is returned. Otherwise p +// method returns {@code D} after adding it to the DFA. +// +//

If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and +// does not change the DFA.

+// +// @param dfa The dfa +// @param D The DFA state to add +// @return The state stored in the DFA. This will be either the existing +// state if {@code D} is already in the DFA, or {@code D} itself if the +// state was not already present. +// +func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState { + if d == ATNSimulatorError { + return d + } + hash := d.hash() + existing, ok := dfa.getState(hash) + if ok { + return existing + } + d.stateNumber = dfa.numStates() + if !d.configs.ReadOnly() { + d.configs.OptimizeConfigs(p.BaseATNSimulator) + d.configs.SetReadOnly(true) + } + dfa.setState(hash, d) + if ParserATNSimulatorDebug { + fmt.Println("adding NewDFA state: " + d.String()) + } + return d +} + +func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs) + } +} + +func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs) + } +} + +// If context sensitive parsing, we know it's ambiguity not conflict// +func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int, + exact bool, ambigAlts *BitSet, configs ATNConfigSet) { + if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug { + interval := NewInterval(startIndex, stopIndex+1) + fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() + + ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval)) + } + if p.parser != nil { + p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs) + } +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go new file mode 100644 index 0000000000..49cd10c5ff --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go @@ -0,0 +1,362 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "reflect" + "strconv" +) + +type ParserRuleContext interface { + RuleContext + + SetException(RecognitionException) + + AddTokenNode(token Token) *TerminalNodeImpl + AddErrorNode(badToken Token) *ErrorNodeImpl + + EnterRule(listener ParseTreeListener) + ExitRule(listener ParseTreeListener) + + SetStart(Token) + GetStart() Token + + SetStop(Token) + GetStop() Token + + AddChild(child RuleContext) RuleContext + RemoveLastChild() +} + +type BaseParserRuleContext struct { + *BaseRuleContext + + start, stop Token + exception RecognitionException + children []Tree +} + +func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext { + prc := new(BaseParserRuleContext) + + prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = -1 + // * If we are debugging or building a parse tree for a Visitor, + // we need to track all of the tokens and rule invocations associated + // with prc rule's context. This is empty for parsing w/o tree constr. + // operation because we don't the need to track the details about + // how we parse prc rule. + // / + prc.children = nil + prc.start = nil + prc.stop = nil + // The exception that forced prc rule to return. If the rule successfully + // completed, prc is {@code nil}. + prc.exception = nil + + return prc +} + +func (prc *BaseParserRuleContext) SetException(e RecognitionException) { + prc.exception = e +} + +func (prc *BaseParserRuleContext) GetChildren() []Tree { + return prc.children +} + +func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) { + // from RuleContext + prc.parentCtx = ctx.parentCtx + prc.invokingState = ctx.invokingState + prc.children = nil + prc.start = ctx.start + prc.stop = ctx.stop +} + +func (prc *BaseParserRuleContext) GetText() string { + if prc.GetChildCount() == 0 { + return "" + } + + var s string + for _, child := range prc.children { + s += child.(ParseTree).GetText() + } + + return s +} + +// Double dispatch methods for listeners +func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) { +} + +func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) { +} + +// * Does not set parent link other add methods do that/// +func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext { + if prc.children == nil { + prc.children = make([]Tree, 0) + } + if child == nil { + panic("Child may not be null") + } + prc.children = append(prc.children, child) + return child +} + +// * Used by EnterOuterAlt to toss out a RuleContext previously added as +// we entered a rule. If we have // label, we will need to remove +// generic ruleContext object. +// / +func (prc *BaseParserRuleContext) RemoveLastChild() { + if prc.children != nil && len(prc.children) > 0 { + prc.children = prc.children[0 : len(prc.children)-1] + } +} + +func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl { + + node := NewTerminalNodeImpl(token) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node + +} + +func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl { + node := NewErrorNodeImpl(badToken) + prc.addTerminalNodeChild(node) + node.parentCtx = prc + return node +} + +func (prc *BaseParserRuleContext) GetChild(i int) Tree { + if prc.children != nil && len(prc.children) >= i { + return prc.children[i] + } + + return nil +} + +func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext { + if childType == nil { + return prc.GetChild(i).(RuleContext) + } + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if reflect.TypeOf(child) == childType { + if i == 0 { + return child.(RuleContext) + } + + i-- + } + } + + return nil +} + +func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string { + return TreesStringTree(prc, ruleNames, recog) +} + +func (prc *BaseParserRuleContext) GetRuleContext() RuleContext { + return prc +} + +func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} { + return visitor.VisitChildren(prc) +} + +func (prc *BaseParserRuleContext) SetStart(t Token) { + prc.start = t +} + +func (prc *BaseParserRuleContext) GetStart() Token { + return prc.start +} + +func (prc *BaseParserRuleContext) SetStop(t Token) { + prc.stop = t +} + +func (prc *BaseParserRuleContext) GetStop() Token { + return prc.stop +} + +func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode { + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if c2, ok := child.(TerminalNode); ok { + if c2.GetSymbol().GetTokenType() == ttype { + if i == 0 { + return c2 + } + + i-- + } + } + } + return nil +} + +func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode { + if prc.children == nil { + return make([]TerminalNode, 0) + } + + tokens := make([]TerminalNode, 0) + + for j := 0; j < len(prc.children); j++ { + child := prc.children[j] + if tchild, ok := child.(TerminalNode); ok { + if tchild.GetSymbol().GetTokenType() == ttype { + tokens = append(tokens, tchild) + } + } + } + + return tokens +} + +func (prc *BaseParserRuleContext) GetPayload() interface{} { + return prc +} + +func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext { + if prc.children == nil || i < 0 || i >= len(prc.children) { + return nil + } + + j := -1 // what element have we found with ctxType? + for _, o := range prc.children { + + childType := reflect.TypeOf(o) + + if childType.Implements(ctxType) { + j++ + if j == i { + return o.(RuleContext) + } + } + } + return nil +} + +// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do +// check for convertibility + +func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext { + return prc.getChild(ctxType, i) +} + +func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext { + if prc.children == nil { + return make([]RuleContext, 0) + } + + contexts := make([]RuleContext, 0) + + for _, child := range prc.children { + childType := reflect.TypeOf(child) + + if childType.ConvertibleTo(ctxType) { + contexts = append(contexts, child.(RuleContext)) + } + } + return contexts +} + +func (prc *BaseParserRuleContext) GetChildCount() int { + if prc.children == nil { + return 0 + } + + return len(prc.children) +} + +func (prc *BaseParserRuleContext) GetSourceInterval() *Interval { + if prc.start == nil || prc.stop == nil { + return TreeInvalidInterval + } + + return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex()) +} + +//need to manage circular dependencies, so export now + +// Print out a whole tree, not just a node, in LISP format +// (root child1 .. childN). Print just a node if b is a leaf. +// + +func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string { + + var p ParserRuleContext = prc + s := "[" + for p != nil && p != stop { + if ruleNames == nil { + if !p.IsEmpty() { + s += strconv.Itoa(p.GetInvokingState()) + } + } else { + ri := p.GetRuleIndex() + var ruleName string + if ri >= 0 && ri < len(ruleNames) { + ruleName = ruleNames[ri] + } else { + ruleName = strconv.Itoa(ri) + } + s += ruleName + } + if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) { + s += " " + } + pi := p.GetParent() + if pi != nil { + p = pi.(ParserRuleContext) + } else { + p = nil + } + } + s += "]" + return s +} + +var RuleContextEmpty = NewBaseParserRuleContext(nil, -1) + +type InterpreterRuleContext interface { + ParserRuleContext +} + +type BaseInterpreterRuleContext struct { + *BaseParserRuleContext +} + +func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext { + + prc := new(BaseInterpreterRuleContext) + + prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber) + + prc.RuleIndex = ruleIndex + + return prc +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go new file mode 100644 index 0000000000..99acb333fa --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go @@ -0,0 +1,756 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" +) + +// Represents {@code $} in local context prediction, which means wildcard. +// {@code//+x =//}. +// / +const ( + BasePredictionContextEmptyReturnState = 0x7FFFFFFF +) + +// Represents {@code $} in an array in full context mode, when {@code $} +// doesn't mean wildcard: {@code $ + x = [$,x]}. Here, +// {@code $} = {@link //EmptyReturnState}. +// / + +var ( + BasePredictionContextglobalNodeCount = 1 + BasePredictionContextid = BasePredictionContextglobalNodeCount +) + +type PredictionContext interface { + hash() int + GetParent(int) PredictionContext + getReturnState(int) int + equals(PredictionContext) bool + length() int + isEmpty() bool + hasEmptyPath() bool + String() string +} + +type BasePredictionContext struct { + cachedHash int +} + +func NewBasePredictionContext(cachedHash int) *BasePredictionContext { + pc := new(BasePredictionContext) + pc.cachedHash = cachedHash + + return pc +} + +func (b *BasePredictionContext) isEmpty() bool { + return false +} + +func calculateHash(parent PredictionContext, returnState int) int { + h := murmurInit(1) + h = murmurUpdate(h, parent.hash()) + h = murmurUpdate(h, returnState) + return murmurFinish(h, 2) +} + +func calculateEmptyHash() int { + h := murmurInit(1) + return murmurFinish(h, 0) +} + +// Used to cache {@link BasePredictionContext} objects. Its used for the shared +// context cash associated with contexts in DFA states. This cache +// can be used for both lexers and parsers. + +type PredictionContextCache struct { + cache map[PredictionContext]PredictionContext +} + +func NewPredictionContextCache() *PredictionContextCache { + t := new(PredictionContextCache) + t.cache = make(map[PredictionContext]PredictionContext) + return t +} + +// Add a context to the cache and return it. If the context already exists, +// return that one instead and do not add a Newcontext to the cache. +// Protect shared cache from unsafe thread access. +// +func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext { + if ctx == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY + } + existing := p.cache[ctx] + if existing != nil { + return existing + } + p.cache[ctx] = ctx + return ctx +} + +func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext { + return p.cache[ctx] +} + +func (p *PredictionContextCache) length() int { + return len(p.cache) +} + +type SingletonPredictionContext interface { + PredictionContext +} + +type BaseSingletonPredictionContext struct { + *BasePredictionContext + + parentCtx PredictionContext + returnState int +} + +func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext { + + s := new(BaseSingletonPredictionContext) + s.BasePredictionContext = NewBasePredictionContext(37) + + if parent != nil { + s.cachedHash = calculateHash(parent, returnState) + } else { + s.cachedHash = calculateEmptyHash() + } + + s.parentCtx = parent + s.returnState = returnState + + return s +} + +func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext { + if returnState == BasePredictionContextEmptyReturnState && parent == nil { + // someone can pass in the bits of an array ctx that mean $ + return BasePredictionContextEMPTY + } + + return NewBaseSingletonPredictionContext(parent, returnState) +} + +func (b *BaseSingletonPredictionContext) length() int { + return 1 +} + +func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext { + return b.parentCtx +} + +func (b *BaseSingletonPredictionContext) getReturnState(index int) int { + return b.returnState +} + +func (b *BaseSingletonPredictionContext) hasEmptyPath() bool { + return b.returnState == BasePredictionContextEmptyReturnState +} + +func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool { + if b == other { + return true + } else if _, ok := other.(*BaseSingletonPredictionContext); !ok { + return false + } else if b.hash() != other.hash() { + return false // can't be same if hash is different + } + + otherP := other.(*BaseSingletonPredictionContext) + + if b.returnState != other.getReturnState(0) { + return false + } else if b.parentCtx == nil { + return otherP.parentCtx == nil + } + + return b.parentCtx.equals(otherP.parentCtx) +} + +func (b *BaseSingletonPredictionContext) hash() int { + h := murmurInit(1) + + if b.parentCtx == nil { + return murmurFinish(h, 0) + } + + h = murmurUpdate(h, b.parentCtx.hash()) + h = murmurUpdate(h, b.returnState) + return murmurFinish(h, 2) +} + +func (b *BaseSingletonPredictionContext) String() string { + var up string + + if b.parentCtx == nil { + up = "" + } else { + up = b.parentCtx.String() + } + + if len(up) == 0 { + if b.returnState == BasePredictionContextEmptyReturnState { + return "$" + } + + return strconv.Itoa(b.returnState) + } + + return strconv.Itoa(b.returnState) + " " + up +} + +var BasePredictionContextEMPTY = NewEmptyPredictionContext() + +type EmptyPredictionContext struct { + *BaseSingletonPredictionContext +} + +func NewEmptyPredictionContext() *EmptyPredictionContext { + + p := new(EmptyPredictionContext) + + p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState) + + return p +} + +func (e *EmptyPredictionContext) isEmpty() bool { + return true +} + +func (e *EmptyPredictionContext) GetParent(index int) PredictionContext { + return nil +} + +func (e *EmptyPredictionContext) getReturnState(index int) int { + return e.returnState +} + +func (e *EmptyPredictionContext) equals(other PredictionContext) bool { + return e == other +} + +func (e *EmptyPredictionContext) String() string { + return "$" +} + +type ArrayPredictionContext struct { + *BasePredictionContext + + parents []PredictionContext + returnStates []int +} + +func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext { + // Parent can be nil only if full ctx mode and we make an array + // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using + // nil parent and + // returnState == {@link //EmptyReturnState}. + + c := new(ArrayPredictionContext) + c.BasePredictionContext = NewBasePredictionContext(37) + + for i := range parents { + c.cachedHash += calculateHash(parents[i], returnStates[i]) + } + + c.parents = parents + c.returnStates = returnStates + + return c +} + +func (a *ArrayPredictionContext) GetReturnStates() []int { + return a.returnStates +} + +func (a *ArrayPredictionContext) hasEmptyPath() bool { + return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) isEmpty() bool { + // since EmptyReturnState can only appear in the last position, we + // don't need to verify that size==1 + return a.returnStates[0] == BasePredictionContextEmptyReturnState +} + +func (a *ArrayPredictionContext) length() int { + return len(a.returnStates) +} + +func (a *ArrayPredictionContext) GetParent(index int) PredictionContext { + return a.parents[index] +} + +func (a *ArrayPredictionContext) getReturnState(index int) int { + return a.returnStates[index] +} + +func (a *ArrayPredictionContext) equals(other PredictionContext) bool { + if _, ok := other.(*ArrayPredictionContext); !ok { + return false + } else if a.cachedHash != other.hash() { + return false // can't be same if hash is different + } else { + otherP := other.(*ArrayPredictionContext) + return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents + } +} + +func (a *ArrayPredictionContext) hash() int { + h := murmurInit(1) + + for _, p := range a.parents { + h = murmurUpdate(h, p.hash()) + } + + for _, r := range a.returnStates { + h = murmurUpdate(h, r) + } + + return murmurFinish(h, 2 * len(a.parents)) +} + +func (a *ArrayPredictionContext) String() string { + if a.isEmpty() { + return "[]" + } + + s := "[" + for i := 0; i < len(a.returnStates); i++ { + if i > 0 { + s = s + ", " + } + if a.returnStates[i] == BasePredictionContextEmptyReturnState { + s = s + "$" + continue + } + s = s + strconv.Itoa(a.returnStates[i]) + if a.parents[i] != nil { + s = s + " " + a.parents[i].String() + } else { + s = s + "nil" + } + } + + return s + "]" +} + +// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph. +// Return {@link //EMPTY} if {@code outerContext} is empty or nil. +// / +func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext { + if outerContext == nil { + outerContext = RuleContextEmpty + } + // if we are in RuleContext of start rule, s, then BasePredictionContext + // is EMPTY. Nobody called us. (if we are empty, return empty) + if outerContext.GetParent() == nil || outerContext == RuleContextEmpty { + return BasePredictionContextEMPTY + } + // If we have a parent, convert it to a BasePredictionContext graph + parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext)) + state := a.states[outerContext.GetInvokingState()] + transition := state.GetTransitions()[0] + + return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber()) +} + +func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + // share same graph if both same + if a == b { + return a + } + + ac, ok1 := a.(*BaseSingletonPredictionContext) + bc, ok2 := b.(*BaseSingletonPredictionContext) + + if ok1 && ok2 { + return mergeSingletons(ac, bc, rootIsWildcard, mergeCache) + } + // At least one of a or b is array + // If one is $ and rootIsWildcard, return $ as// wildcard + if rootIsWildcard { + if _, ok := a.(*EmptyPredictionContext); ok { + return a + } + if _, ok := b.(*EmptyPredictionContext); ok { + return b + } + } + // convert singleton so both are arrays to normalize + if _, ok := a.(*BaseSingletonPredictionContext); ok { + a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)}) + } + if _, ok := b.(*BaseSingletonPredictionContext); ok { + b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)}) + } + return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache) +} + +// +// Merge two {@link SingletonBasePredictionContext} instances. +// +//

Stack tops equal, parents merge is same return left graph.
+//

+// +//

Same stack top, parents differ merge parents giving array node, then +// remainders of those graphs. A Newroot node is created to point to the +// merged parents.
+//

+// +//

Different stack tops pointing to same parent. Make array node for the +// root where both element in the root point to the same (original) +// parent.
+//

+// +//

Different stack tops pointing to different parents. Make array node for +// the root where each element points to the corresponding original +// parent.
+//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// @param mergeCache +// / +func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.hash(), b.hash()) + if previous != nil { + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.hash(), a.hash()) + if previous != nil { + return previous.(PredictionContext) + } + } + + rootMerge := mergeRoot(a, b, rootIsWildcard) + if rootMerge != nil { + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), rootMerge) + } + return rootMerge + } + if a.returnState == b.returnState { + parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache) + // if parent is same as existing a or b parent or reduced to a parent, + // return it + if parent == a.parentCtx { + return a // ax + bx = ax, if a=b + } + if parent == b.parentCtx { + return b // ax + bx = bx, if a=b + } + // else: ax + ay = a'[x,y] + // merge parents x and y, giving array node with x,y then remainders + // of those graphs. dup a, a' points at merged array + // Newjoined parent so create Newsingleton pointing to it, a' + spc := SingletonBasePredictionContextCreate(parent, a.returnState) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), spc) + } + return spc + } + // a != b payloads differ + // see if we can collapse parents due to $+x parents if local ctx + var singleParent PredictionContext + if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax + + // bx = + // [a,b]x + singleParent = a.parentCtx + } + if singleParent != nil { // parents are same + // sort payloads and use same parent + payloads := []int{a.returnState, b.returnState} + if a.returnState > b.returnState { + payloads[0] = b.returnState + payloads[1] = a.returnState + } + parents := []PredictionContext{singleParent, singleParent} + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), apc) + } + return apc + } + // parents differ and can't merge them. Just pack together + // into array can't merge. + // ax + by = [ax,by] + payloads := []int{a.returnState, b.returnState} + parents := []PredictionContext{a.parentCtx, b.parentCtx} + if a.returnState > b.returnState { // sort by payload + payloads[0] = b.returnState + payloads[1] = a.returnState + parents = []PredictionContext{b.parentCtx, a.parentCtx} + } + apc := NewArrayPredictionContext(parents, payloads) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), apc) + } + return apc +} + +// +// Handle case where at least one of {@code a} or {@code b} is +// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used +// to represent {@link //EMPTY}. +// +//

Local-Context Merges

+// +//

These local-context merge operations are used when {@code rootIsWildcard} +// is true.

+// +//

{@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//

+// +//

{@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is +// {@code //EMPTY} return left graph.
+//

+// +//

Special case of last merge if local context.
+//

+// +//

Full-Context Merges

+// +//

These full-context merge operations are used when {@code rootIsWildcard} +// is false.

+// +//

+// +//

Must keep all contexts {@link //EMPTY} in array is a special value (and +// nil parent).
+//

+// +//

+// +// @param a the first {@link SingletonBasePredictionContext} +// @param b the second {@link SingletonBasePredictionContext} +// @param rootIsWildcard {@code true} if this is a local-context merge, +// otherwise false to indicate a full-context merge +// / +func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext { + if rootIsWildcard { + if a == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // // + b =// + } + if b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // a +// =// + } + } else { + if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY { + return BasePredictionContextEMPTY // $ + $ = $ + } else if a == BasePredictionContextEMPTY { // $ + x = [$,x] + payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{b.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present) + payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState} + parents := []PredictionContext{a.GetParent(-1), nil} + return NewArrayPredictionContext(parents, payloads) + } + } + return nil +} + +// +// Merge two {@link ArrayBasePredictionContext} instances. +// +//

Different tops, different parents.
+//

+// +//

Shared top, same parents.
+//

+// +//

Shared top, different parents.
+//

+// +//

Shared top, all shared parents.
+//

+// +//

Equal tops, merge parents and reduce top to +// {@link SingletonBasePredictionContext}.
+//

+// / +func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext { + if mergeCache != nil { + previous := mergeCache.Get(a.hash(), b.hash()) + if previous != nil { + return previous.(PredictionContext) + } + previous = mergeCache.Get(b.hash(), a.hash()) + if previous != nil { + return previous.(PredictionContext) + } + } + // merge sorted payloads a + b => M + i := 0 // walks a + j := 0 // walks b + k := 0 // walks target M array + + mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates)) + mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates)) + // walk and merge to yield mergedParents, mergedReturnStates + for i < len(a.returnStates) && j < len(b.returnStates) { + aParent := a.parents[i] + bParent := b.parents[j] + if a.returnStates[i] == b.returnStates[j] { + // same payload (stack tops are equal), must yield merged singleton + payload := a.returnStates[i] + // $+$ = $ + bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil + axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax + // -> + // ax + if bothDollars || axAX { + mergedParents[k] = aParent // choose left + mergedReturnStates[k] = payload + } else { // ax+ay -> a'[x,y] + mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache) + mergedParents[k] = mergedParent + mergedReturnStates[k] = payload + } + i++ // hop over left one as usual + j++ // but also Skip one in right side since we merge + } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M + mergedParents[k] = aParent + mergedReturnStates[k] = a.returnStates[i] + i++ + } else { // b > a, copy b[j] to M + mergedParents[k] = bParent + mergedReturnStates[k] = b.returnStates[j] + j++ + } + k++ + } + // copy over any payloads remaining in either array + if i < len(a.returnStates) { + for p := i; p < len(a.returnStates); p++ { + mergedParents[k] = a.parents[p] + mergedReturnStates[k] = a.returnStates[p] + k++ + } + } else { + for p := j; p < len(b.returnStates); p++ { + mergedParents[k] = b.parents[p] + mergedReturnStates[k] = b.returnStates[p] + k++ + } + } + // trim merged if we combined a few that had same stack tops + if k < len(mergedParents) { // write index < last position trim + if k == 1 { // for just one merged element, return singleton top + pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0]) + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), pc) + } + return pc + } + mergedParents = mergedParents[0:k] + mergedReturnStates = mergedReturnStates[0:k] + } + + M := NewArrayPredictionContext(mergedParents, mergedReturnStates) + + // if we created same array as a or b, return that instead + // TODO: track whether this is possible above during merge sort for speed + if M == a { + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), a) + } + return a + } + if M == b { + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), b) + } + return b + } + combineCommonParents(mergedParents) + + if mergeCache != nil { + mergeCache.set(a.hash(), b.hash(), M) + } + return M +} + +// +// Make pass over all M {@code parents} merge any {@code equals()} +// ones. +// / +func combineCommonParents(parents []PredictionContext) { + uniqueParents := make(map[PredictionContext]PredictionContext) + + for p := 0; p < len(parents); p++ { + parent := parents[p] + if uniqueParents[parent] == nil { + uniqueParents[parent] = parent + } + } + for q := 0; q < len(parents); q++ { + parents[q] = uniqueParents[parents[q]] + } +} + +func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext { + + if context.isEmpty() { + return context + } + existing := visited[context] + if existing != nil { + return existing + } + existing = contextCache.Get(context) + if existing != nil { + visited[context] = existing + return existing + } + changed := false + parents := make([]PredictionContext, context.length()) + for i := 0; i < len(parents); i++ { + parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited) + if changed || parent != context.GetParent(i) { + if !changed { + parents = make([]PredictionContext, context.length()) + for j := 0; j < context.length(); j++ { + parents[j] = context.GetParent(j) + } + changed = true + } + parents[i] = parent + } + } + if !changed { + contextCache.add(context) + visited[context] = context + return context + } + var updated PredictionContext + if len(parents) == 0 { + updated = BasePredictionContextEMPTY + } else if len(parents) == 1 { + updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0)) + } else { + updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates()) + } + contextCache.add(updated) + visited[updated] = updated + visited[context] = updated + + return updated +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go new file mode 100644 index 0000000000..15718f912b --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go @@ -0,0 +1,553 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// This enumeration defines the prediction modes available in ANTLR 4 along with +// utility methods for analyzing configuration sets for conflicts and/or +// ambiguities. + +const ( + // + // The SLL(*) prediction mode. This prediction mode ignores the current + // parser context when making predictions. This is the fastest prediction + // mode, and provides correct results for many grammars. This prediction + // mode is more powerful than the prediction mode provided by ANTLR 3, but + // may result in syntax errors for grammar and input combinations which are + // not SLL. + // + //

+ // When using this prediction mode, the parser will either return a correct + // parse tree (i.e. the same parse tree that would be returned with the + // {@link //LL} prediction mode), or it will Report a syntax error. If a + // syntax error is encountered when using the {@link //SLL} prediction mode, + // it may be due to either an actual syntax error in the input or indicate + // that the particular combination of grammar and input requires the more + // powerful {@link //LL} prediction abilities to complete successfully.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeSLL = 0 + // + // The LL(*) prediction mode. This prediction mode allows the current parser + // context to be used for resolving SLL conflicts that occur during + // prediction. This is the fastest prediction mode that guarantees correct + // parse results for all combinations of grammars with syntactically correct + // inputs. + // + //

+ // When using this prediction mode, the parser will make correct decisions + // for all syntactically-correct grammar and input combinations. However, in + // cases where the grammar is truly ambiguous this prediction mode might not + // Report a precise answer for exactly which alternatives are + // ambiguous.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeLL = 1 + // + // The LL(*) prediction mode with exact ambiguity detection. In addition to + // the correctness guarantees provided by the {@link //LL} prediction mode, + // this prediction mode instructs the prediction algorithm to determine the + // complete and exact set of ambiguous alternatives for every ambiguous + // decision encountered while parsing. + // + //

+ // This prediction mode may be used for diagnosing ambiguities during + // grammar development. Due to the performance overhead of calculating sets + // of ambiguous alternatives, this prediction mode should be avoided when + // the exact results are not necessary.

+ // + //

+ // This prediction mode does not provide any guarantees for prediction + // behavior for syntactically-incorrect inputs.

+ // + PredictionModeLLExactAmbigDetection = 2 +) + +// +// Computes the SLL prediction termination condition. +// +//

+// This method computes the SLL prediction termination condition for both of +// the following cases.

+// +//
    +//
  • The usual SLL+LL fallback upon SLL conflict
  • +//
  • Pure SLL without LL fallback
  • +//
+// +//

COMBINED SLL+LL PARSING

+// +//

When LL-fallback is enabled upon SLL conflict, correct predictions are +// ensured regardless of how the termination condition is computed by this +// method. Due to the substantially higher cost of LL prediction, the +// prediction should only fall back to LL when the additional lookahead +// cannot lead to a unique SLL prediction.

+// +//

Assuming combined SLL+LL parsing, an SLL configuration set with only +// conflicting subsets should fall back to full LL, even if the +// configuration sets don't resolve to the same alternative (e.g. +// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting +// configuration, SLL could continue with the hopes that more lookahead will +// resolve via one of those non-conflicting configurations.

+// +//

Here's the prediction termination rule them: SLL (for SLL+LL parsing) +// stops when it sees only conflicting configuration subsets. In contrast, +// full LL keeps going when there is uncertainty.

+// +//

HEURISTIC

+// +//

As a heuristic, we stop prediction when we see any conflicting subset +// unless we see a state that only has one alternative associated with it. +// The single-alt-state thing lets prediction continue upon rules like +// (otherwise, it would admit defeat too soon):

+// +//

{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }

+// +//

When the ATN simulation reaches the state before {@code ''}, it has a +// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally +// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop +// processing this node because alternative to has another way to continue, +// via {@code [6|2|[]]}.

+// +//

It also let's us continue for this rule:

+// +//

{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }

+// +//

After Matching input A, we reach the stop state for rule A, state 1. +// State 8 is the state right before B. Clearly alternatives 1 and 2 +// conflict and no amount of further lookahead will separate the two. +// However, alternative 3 will be able to continue and so we do not stop +// working on this state. In the previous example, we're concerned with +// states associated with the conflicting alternatives. Here alt 3 is not +// associated with the conflicting configs, but since we can continue +// looking for input reasonably, don't declare the state done.

+// +//

PURE SLL PARSING

+// +//

To handle pure SLL parsing, all we have to do is make sure that we +// combine stack contexts for configurations that differ only by semantic +// predicate. From there, we can do the usual SLL termination heuristic.

+// +//

PREDICATES IN SLL+LL PARSING

+// +//

SLL decisions don't evaluate predicates until after they reach DFA stop +// states because they need to create the DFA cache that works in all +// semantic situations. In contrast, full LL evaluates predicates collected +// during start state computation so it can ignore predicates thereafter. +// This means that SLL termination detection can totally ignore semantic +// predicates.

+// +//

Implementation-wise, {@link ATNConfigSet} combines stack contexts but not +// semantic predicate contexts so we might see two configurations like the +// following.

+// +//

{@code (s, 1, x, {}), (s, 1, x', {p})}

+// +//

Before testing these configurations against others, we have to merge +// {@code x} and {@code x'} (without modifying the existing configurations). +// For example, we test {@code (x+x')==x''} when looking for conflicts in +// the following configurations.

+// +//

{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}

+// +//

If the configuration set has predicates (as indicated by +// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of +// the configurations to strip out all of the predicates so that a standard +// {@link ATNConfigSet} will merge everything ignoring predicates.

+// +func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool { + // Configs in rule stop states indicate reaching the end of the decision + // rule (local context) or end of start rule (full context). If all + // configs meet this condition, then none of the configurations is able + // to Match additional input so we terminate prediction. + // + if PredictionModeallConfigsInRuleStopStates(configs) { + return true + } + // pure SLL mode parsing + if mode == PredictionModeSLL { + // Don't bother with combining configs from different semantic + // contexts if we can fail over to full LL costs more time + // since we'll often fail over anyway. + if configs.HasSemanticContext() { + // dup configs, tossing out semantic predicates + dup := NewBaseATNConfigSet(false) + for _, c := range configs.GetItems() { + + // NewBaseATNConfig({semanticContext:}, c) + c = NewBaseATNConfig2(c, SemanticContextNone) + dup.Add(c, nil) + } + configs = dup + } + // now we have combined contexts for configs with dissimilar preds + } + // pure SLL or combined SLL+LL mode parsing + altsets := PredictionModegetConflictingAltSubsets(configs) + return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs) +} + +// Checks if any configuration in {@code configs} is in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if any configuration in {@code configs} is in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool { + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); ok { + return true + } + } + return false +} + +// Checks if all configurations in {@code configs} are in a +// {@link RuleStopState}. Configurations meeting this condition have reached +// the end of the decision rule (local context) or end of start rule (full +// context). +// +// @param configs the configuration set to test +// @return {@code true} if all configurations in {@code configs} are in a +// {@link RuleStopState}, otherwise {@code false} +func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool { + + for _, c := range configs.GetItems() { + if _, ok := c.GetState().(*RuleStopState); !ok { + return false + } + } + return true +} + +// +// Full LL prediction termination. +// +//

Can we stop looking ahead during ATN simulation or is there some +// uncertainty as to which alternative we will ultimately pick, after +// consuming more input? Even if there are partial conflicts, we might know +// that everything is going to resolve to the same minimum alternative. That +// means we can stop since no more lookahead will change that fact. On the +// other hand, there might be multiple conflicts that resolve to different +// minimums. That means we need more look ahead to decide which of those +// alternatives we should predict.

+// +//

The basic idea is to split the set of configurations {@code C}, into +// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with +// non-conflicting configurations. Two configurations conflict if they have +// identical {@link ATNConfig//state} and {@link ATNConfig//context} values +// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)} +// and {@code (s, j, ctx, _)} for {@code i!=j}.

+// +//

Reduce these configuration subsets to the set of possible alternatives. +// You can compute the alternative subsets in one pass as follows:

+// +//

{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in +// {@code C} holding {@code s} and {@code ctx} fixed.

+// +//

Or in pseudo-code, for each configuration {@code c} in {@code C}:

+// +//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+// 
+// +//

The values in {@code map} are the set of {@code A_s,ctx} sets.

+// +//

If {@code |A_s,ctx|=1} then there is no conflict associated with +// {@code s} and {@code ctx}.

+// +//

Reduce the subsets to singletons by choosing a minimum of each subset. If +// the union of these alternative subsets is a singleton, then no amount of +// more lookahead will help us. We will always pick that alternative. If, +// however, there is more than one alternative, then we are uncertain which +// alternative to predict and must continue looking for resolution. We may +// or may not discover an ambiguity in the future, even if there are no +// conflicting subsets this round.

+// +//

The biggest sin is to terminate early because it means we've made a +// decision but were uncertain as to the eventual outcome. We haven't used +// enough lookahead. On the other hand, announcing a conflict too late is no +// big deal you will still have the conflict. It's just inefficient. It +// might even look until the end of file.

+// +//

No special consideration for semantic predicates is required because +// predicates are evaluated on-the-fly for full LL prediction, ensuring that +// no configuration contains a semantic context during the termination +// check.

+// +//

CONFLICTING CONFIGS

+// +//

Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict +// when {@code i!=j} but {@code x=x'}. Because we merge all +// {@code (s, i, _)} configurations together, that means that there are at +// most {@code n} configurations associated with state {@code s} for +// {@code n} possible alternatives in the decision. The merged stacks +// complicate the comparison of configuration contexts {@code x} and +// {@code x'}. Sam checks to see if one is a subset of the other by calling +// merge and checking to see if the merged result is either {@code x} or +// {@code x'}. If the {@code x} associated with lowest alternative {@code i} +// is the superset, then {@code i} is the only possible prediction since the +// others resolve to {@code min(i)} as well. However, if {@code x} is +// associated with {@code j>i} then at least one stack configuration for +// {@code j} is not in conflict with alternative {@code i}. The algorithm +// should keep going, looking for more lookahead due to the uncertainty.

+// +//

For simplicity, I'm doing a equality check between {@code x} and +// {@code x'} that lets the algorithm continue to consume lookahead longer +// than necessary. The reason I like the equality is of course the +// simplicity but also because that is the test you need to detect the +// alternatives that are actually in conflict.

+// +//

CONTINUE/STOP RULE

+// +//

Continue if union of resolved alternative sets from non-conflicting and +// conflicting alternative subsets has more than one alternative. We are +// uncertain about which alternative to predict.

+// +//

The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which +// alternatives are still in the running for the amount of input we've +// consumed at this point. The conflicting sets let us to strip away +// configurations that won't lead to more states because we resolve +// conflicts to the configuration with a minimum alternate for the +// conflicting set.

+// +//

CASES

+// +//
    +// +//
  • no conflicts and more than 1 alternative in set => continue
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)}, +// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set +// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1,3}} => continue +//
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set +// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} = +// {@code {1}} => stop and predict 1
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)}, +// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {1}} = {@code {1}} => stop and predict 1, can announce +// ambiguity {@code {1,2}}
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)}, +// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {2}} = {@code {1,2}} => continue
  • +// +//
  • {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)}, +// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U +// {@code {3}} = {@code {1,3}} => continue
  • +// +//
+// +//

EXACT AMBIGUITY DETECTION

+// +//

If all states Report the same conflicting set of alternatives, then we +// know we have the exact ambiguity set.

+// +//

|A_i|>1 and +// A_i = A_j for all i, j.

+// +//

In other words, we continue examining lookahead until all {@code A_i} +// have more than one alternative and all {@code A_i} are the same. If +// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate +// because the resolved set is {@code {1}}. To determine what the real +// ambiguity is, we have to know whether the ambiguity is between one and +// two or one and three so we keep going. We can only stop prediction when +// we need exact ambiguity detection when the sets look like +// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...

+// +func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int { + return PredictionModegetSingleViableAlt(altsets) +} + +// +// Determines if every alternative subset in {@code altsets} contains more +// than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every {@link BitSet} in {@code altsets} has +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +// +func PredictionModeallSubsetsConflict(altsets []*BitSet) bool { + return !PredictionModehasNonConflictingAltSet(altsets) +} + +// +// Determines if any single alternative subset in {@code altsets} contains +// exactly one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} 1, otherwise {@code false} +// +func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() == 1 { + return true + } + } + return false +} + +// +// Determines if any single alternative subset in {@code altsets} contains +// more than one alternative. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if {@code altsets} contains a {@link BitSet} with +// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false} +// +func PredictionModehasConflictingAltSet(altsets []*BitSet) bool { + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if alts.length() > 1 { + return true + } + } + return false +} + +// +// Determines if every alternative subset in {@code altsets} is equivalent. +// +// @param altsets a collection of alternative subsets +// @return {@code true} if every member of {@code altsets} is equal to the +// others, otherwise {@code false} +// +func PredictionModeallSubsetsEqual(altsets []*BitSet) bool { + var first *BitSet + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + if first == nil { + first = alts + } else if alts != first { + return false + } + } + + return true +} + +// +// Returns the unique alternative predicted by all alternative subsets in +// {@code altsets}. If no such alternative exists, this method returns +// {@link ATN//INVALID_ALT_NUMBER}. +// +// @param altsets a collection of alternative subsets +// +func PredictionModegetUniqueAlt(altsets []*BitSet) int { + all := PredictionModeGetAlts(altsets) + if all.length() == 1 { + return all.minValue() + } + + return ATNInvalidAltNumber +} + +// Gets the complete set of represented alternatives for a collection of +// alternative subsets. This method returns the union of each {@link BitSet} +// in {@code altsets}. +// +// @param altsets a collection of alternative subsets +// @return the set of represented alternatives in {@code altsets} +// +func PredictionModeGetAlts(altsets []*BitSet) *BitSet { + all := NewBitSet() + for _, alts := range altsets { + all.or(alts) + } + return all +} + +// +// This func gets the conflicting alt subsets from a configuration set. +// For each configuration {@code c} in {@code configs}: +// +//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+// 
+// +func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet { + configToAlts := make(map[int]*BitSet) + + for _, c := range configs.GetItems() { + key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash() + + alts, ok := configToAlts[key] + if !ok { + alts = NewBitSet() + configToAlts[key] = alts + } + alts.add(c.GetAlt()) + } + + values := make([]*BitSet, 0, 10) + for _, v := range configToAlts { + values = append(values, v) + } + return values +} + +// +// Get a map from state to alt subset from a configuration set. For each +// configuration {@code c} in {@code configs}: +// +//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+// 
+// +func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict { + m := NewAltDict() + + for _, c := range configs.GetItems() { + alts := m.Get(c.GetState().String()) + if alts == nil { + alts = NewBitSet() + m.put(c.GetState().String(), alts) + } + alts.(*BitSet).add(c.GetAlt()) + } + return m +} + +func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool { + values := PredictionModeGetStateToAltMap(configs).values() + for i := 0; i < len(values); i++ { + if values[i].(*BitSet).length() == 1 { + return true + } + } + return false +} + +func PredictionModegetSingleViableAlt(altsets []*BitSet) int { + result := ATNInvalidAltNumber + + for i := 0; i < len(altsets); i++ { + alts := altsets[i] + minAlt := alts.minValue() + if result == ATNInvalidAltNumber { + result = minAlt + } else if result != minAlt { // more than 1 viable alt + return ATNInvalidAltNumber + } + } + return result +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go new file mode 100644 index 0000000000..d114800f42 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go @@ -0,0 +1,217 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strings" + + "strconv" +) + +type Recognizer interface { + GetLiteralNames() []string + GetSymbolicNames() []string + GetRuleNames() []string + + Sempred(RuleContext, int, int) bool + Precpred(RuleContext, int) bool + + GetState() int + SetState(int) + Action(RuleContext, int, int) + AddErrorListener(ErrorListener) + RemoveErrorListeners() + GetATN() *ATN + GetErrorListenerDispatch() ErrorListener +} + +type BaseRecognizer struct { + listeners []ErrorListener + state int + + RuleNames []string + LiteralNames []string + SymbolicNames []string + GrammarFileName string +} + +func NewBaseRecognizer() *BaseRecognizer { + rec := new(BaseRecognizer) + rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE} + rec.state = -1 + return rec +} + +var tokenTypeMapCache = make(map[string]int) +var ruleIndexMapCache = make(map[string]int) + +func (b *BaseRecognizer) checkVersion(toolVersion string) { + runtimeVersion := "4.8" + if runtimeVersion != toolVersion { + fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion) + } +} + +func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) { + panic("action not implemented on Recognizer!") +} + +func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) { + b.listeners = append(b.listeners, listener) +} + +func (b *BaseRecognizer) RemoveErrorListeners() { + b.listeners = make([]ErrorListener, 0) +} + +func (b *BaseRecognizer) GetRuleNames() []string { + return b.RuleNames +} + +func (b *BaseRecognizer) GetTokenNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetSymbolicNames() []string { + return b.SymbolicNames +} + +func (b *BaseRecognizer) GetLiteralNames() []string { + return b.LiteralNames +} + +func (b *BaseRecognizer) GetState() int { + return b.state +} + +func (b *BaseRecognizer) SetState(v int) { + b.state = v +} + +//func (b *Recognizer) GetTokenTypeMap() { +// var tokenNames = b.GetTokenNames() +// if (tokenNames==nil) { +// panic("The current recognizer does not provide a list of token names.") +// } +// var result = tokenTypeMapCache[tokenNames] +// if(result==nil) { +// result = tokenNames.reduce(function(o, k, i) { o[k] = i }) +// result.EOF = TokenEOF +// tokenTypeMapCache[tokenNames] = result +// } +// return result +//} + +// Get a map from rule names to rule indexes. +// +//

Used for XPath and tree pattern compilation.

+// +func (b *BaseRecognizer) GetRuleIndexMap() map[string]int { + + panic("Method not defined!") + // var ruleNames = b.GetRuleNames() + // if (ruleNames==nil) { + // panic("The current recognizer does not provide a list of rule names.") + // } + // + // var result = ruleIndexMapCache[ruleNames] + // if(result==nil) { + // result = ruleNames.reduce(function(o, k, i) { o[k] = i }) + // ruleIndexMapCache[ruleNames] = result + // } + // return result +} + +func (b *BaseRecognizer) GetTokenType(tokenName string) int { + panic("Method not defined!") + // var ttype = b.GetTokenTypeMap()[tokenName] + // if (ttype !=nil) { + // return ttype + // } else { + // return TokenInvalidType + // } +} + +//func (b *Recognizer) GetTokenTypeMap() map[string]int { +// Vocabulary vocabulary = getVocabulary() +// +// Synchronized (tokenTypeMapCache) { +// Map result = tokenTypeMapCache.Get(vocabulary) +// if (result == null) { +// result = new HashMap() +// for (int i = 0; i < GetATN().maxTokenType; i++) { +// String literalName = vocabulary.getLiteralName(i) +// if (literalName != null) { +// result.put(literalName, i) +// } +// +// String symbolicName = vocabulary.GetSymbolicName(i) +// if (symbolicName != null) { +// result.put(symbolicName, i) +// } +// } +// +// result.put("EOF", Token.EOF) +// result = Collections.unmodifiableMap(result) +// tokenTypeMapCache.put(vocabulary, result) +// } +// +// return result +// } +//} + +// What is the error header, normally line/character position information?// +func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string { + line := e.GetOffendingToken().GetLine() + column := e.GetOffendingToken().GetColumn() + return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column) +} + +// How should a token be displayed in an error message? The default +// is to display just the text, but during development you might +// want to have a lot of information spit out. Override in that case +// to use t.String() (which, for CommonToken, dumps everything about +// the token). This is better than forcing you to override a method in +// your token objects because you don't have to go modify your lexer +// so that it creates a NewJava type. +// +// @deprecated This method is not called by the ANTLR 4 Runtime. Specific +// implementations of {@link ANTLRErrorStrategy} may provide a similar +// feature when necessary. For example, see +// {@link DefaultErrorStrategy//GetTokenErrorDisplay}. +// +func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string { + if t == nil { + return "" + } + s := t.GetText() + if s == "" { + if t.GetTokenType() == TokenEOF { + s = "" + } else { + s = "<" + strconv.Itoa(t.GetTokenType()) + ">" + } + } + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + + return "'" + s + "'" +} + +func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener { + return NewProxyErrorListener(b.listeners) +} + +// subclass needs to override these if there are sempreds or actions +// that the ATN interp needs to execute +func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool { + return true +} + +func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool { + return true +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go new file mode 100644 index 0000000000..600cf8c062 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go @@ -0,0 +1,114 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// A rule context is a record of a single rule invocation. It knows +// which context invoked it, if any. If there is no parent context, then +// naturally the invoking state is not valid. The parent link +// provides a chain upwards from the current rule invocation to the root +// of the invocation tree, forming a stack. We actually carry no +// information about the rule associated with b context (except +// when parsing). We keep only the state number of the invoking state from +// the ATN submachine that invoked b. Contrast b with the s +// pointer inside ParserRuleContext that tracks the current state +// being "executed" for the current rule. +// +// The parent contexts are useful for computing lookahead sets and +// getting error information. +// +// These objects are used during parsing and prediction. +// For the special case of parsers, we use the subclass +// ParserRuleContext. +// +// @see ParserRuleContext +// + +type RuleContext interface { + RuleNode + + GetInvokingState() int + SetInvokingState(int) + + GetRuleIndex() int + IsEmpty() bool + + GetAltNumber() int + SetAltNumber(altNumber int) + + String([]string, RuleContext) string +} + +type BaseRuleContext struct { + parentCtx RuleContext + invokingState int + RuleIndex int +} + +func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext { + + rn := new(BaseRuleContext) + + // What context invoked b rule? + rn.parentCtx = parent + + // What state invoked the rule associated with b context? + // The "return address" is the followState of invokingState + // If parent is nil, b should be -1. + if parent == nil { + rn.invokingState = -1 + } else { + rn.invokingState = invokingState + } + + return rn +} + +func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext { + return b +} + +func (b *BaseRuleContext) SetParent(v Tree) { + if v == nil { + b.parentCtx = nil + } else { + b.parentCtx = v.(RuleContext) + } +} + +func (b *BaseRuleContext) GetInvokingState() int { + return b.invokingState +} + +func (b *BaseRuleContext) SetInvokingState(t int) { + b.invokingState = t +} + +func (b *BaseRuleContext) GetRuleIndex() int { + return b.RuleIndex +} + +func (b *BaseRuleContext) GetAltNumber() int { + return ATNInvalidAltNumber +} + +func (b *BaseRuleContext) SetAltNumber(altNumber int) {} + +// A context is empty if there is no invoking state meaning nobody call +// current context. +func (b *BaseRuleContext) IsEmpty() bool { + return b.invokingState == -1 +} + +// Return the combined text of all child nodes. This method only considers +// tokens which have been added to the parse tree. +//

+// Since tokens on hidden channels (e.g. whitespace or comments) are not +// added to the parse trees, they will not appear in the output of b +// method. +// + +func (b *BaseRuleContext) GetParent() Tree { + return b.parentCtx +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go new file mode 100644 index 0000000000..49205a1624 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go @@ -0,0 +1,455 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "fmt" + "strconv" +) + +// A tree structure used to record the semantic context in which +// an ATN configuration is valid. It's either a single predicate, +// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}. +// +//

I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of +// {@link SemanticContext} within the scope of this outer class.

+// + +type SemanticContext interface { + comparable + + evaluate(parser Recognizer, outerContext RuleContext) bool + evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext + + hash() int + String() string +} + +func SemanticContextandContext(a, b SemanticContext) SemanticContext { + if a == nil || a == SemanticContextNone { + return b + } + if b == nil || b == SemanticContextNone { + return a + } + result := NewAND(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +func SemanticContextorContext(a, b SemanticContext) SemanticContext { + if a == nil { + return b + } + if b == nil { + return a + } + if a == SemanticContextNone || b == SemanticContextNone { + return SemanticContextNone + } + result := NewOR(a, b) + if len(result.opnds) == 1 { + return result.opnds[0] + } + + return result +} + +type Predicate struct { + ruleIndex int + predIndex int + isCtxDependent bool +} + +func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate { + p := new(Predicate) + + p.ruleIndex = ruleIndex + p.predIndex = predIndex + p.isCtxDependent = isCtxDependent // e.g., $i ref in pred + return p +} + +//The default {@link SemanticContext}, which is semantically equivalent to +//a predicate of the form {@code {true}?}. + +var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false) + +func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + return p +} + +func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + + var localctx RuleContext + + if p.isCtxDependent { + localctx = outerContext + } + + return parser.Sempred(localctx, p.ruleIndex, p.predIndex) +} + +func (p *Predicate) equals(other interface{}) bool { + if p == other { + return true + } else if _, ok := other.(*Predicate); !ok { + return false + } else { + return p.ruleIndex == other.(*Predicate).ruleIndex && + p.predIndex == other.(*Predicate).predIndex && + p.isCtxDependent == other.(*Predicate).isCtxDependent + } +} + +func (p *Predicate) hash() int { + return p.ruleIndex*43 + p.predIndex*47 +} + +func (p *Predicate) String() string { + return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?" +} + +type PrecedencePredicate struct { + precedence int +} + +func NewPrecedencePredicate(precedence int) *PrecedencePredicate { + + p := new(PrecedencePredicate) + p.precedence = precedence + + return p +} + +func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool { + return parser.Precpred(outerContext, p.precedence) +} + +func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + if parser.Precpred(outerContext, p.precedence) { + return SemanticContextNone + } + + return nil +} + +func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int { + return p.precedence - other.precedence +} + +func (p *PrecedencePredicate) equals(other interface{}) bool { + if p == other { + return true + } else if _, ok := other.(*PrecedencePredicate); !ok { + return false + } else { + return p.precedence == other.(*PrecedencePredicate).precedence + } +} + +func (p *PrecedencePredicate) hash() int { + return p.precedence * 51 +} + +func (p *PrecedencePredicate) String() string { + return "{" + strconv.Itoa(p.precedence) + ">=prec}?" +} + +func PrecedencePredicatefilterPrecedencePredicates(set *Set) []*PrecedencePredicate { + result := make([]*PrecedencePredicate, 0) + + for _, v := range set.values() { + if c2, ok := v.(*PrecedencePredicate); ok { + result = append(result, c2) + } + } + + return result +} + +// A semantic context which is true whenever none of the contained contexts +// is false.` + +type AND struct { + opnds []SemanticContext +} + +func NewAND(a, b SemanticContext) *AND { + + operands := NewSet(nil, nil) + if aa, ok := a.(*AND); ok { + for _, o := range aa.opnds { + operands.add(o) + } + } else { + operands.add(a) + } + + if ba, ok := b.(*AND); ok { + for _, o := range ba.opnds { + operands.add(o) + } + } else { + operands.add(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence < reduced.precedence { + reduced = p + } + } + + operands.add(reduced) + } + + vs := operands.values() + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + and := new(AND) + and.opnds = opnds + + return and +} + +func (a *AND) equals(other interface{}) bool { + if a == other { + return true + } else if _, ok := other.(*AND); !ok { + return false + } else { + for i, v := range other.(*AND).opnds { + if !a.opnds[i].equals(v) { + return false + } + } + return true + } +} + +// +// {@inheritDoc} +// +//

+// The evaluation of predicates by a context is short-circuiting, but +// unordered.

+// +func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(a.opnds); i++ { + if !a.opnds[i].evaluate(parser, outerContext) { + return false + } + } + return true +} + +func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + + for i := 0; i < len(a.opnds); i++ { + context := a.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == nil { + // The AND context is false if any element is false + return nil + } else if evaluated != SemanticContextNone { + // Reduce the result by Skipping true elements + operands = append(operands, evaluated) + } + } + if !differs { + return a + } + + if len(operands) == 0 { + // all elements were true, so the AND context is true + return SemanticContextNone + } + + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextandContext(result, o) + } + } + + return result +} + +func (a *AND) hash() int { + h := murmurInit(37) // Init with a value different from OR + for _, op := range a.opnds { + h = murmurUpdate(h, op.hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *OR) hash() int { + h := murmurInit(41) // Init with a value different from AND + for _, op := range a.opnds { + h = murmurUpdate(h, op.hash()) + } + return murmurFinish(h, len(a.opnds)) +} + +func (a *AND) String() string { + s := "" + + for _, o := range a.opnds { + s += "&& " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} + +// +// A semantic context which is true whenever at least one of the contained +// contexts is true. +// + +type OR struct { + opnds []SemanticContext +} + +func NewOR(a, b SemanticContext) *OR { + + operands := NewSet(nil, nil) + if aa, ok := a.(*OR); ok { + for _, o := range aa.opnds { + operands.add(o) + } + } else { + operands.add(a) + } + + if ba, ok := b.(*OR); ok { + for _, o := range ba.opnds { + operands.add(o) + } + } else { + operands.add(b) + } + precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands) + if len(precedencePredicates) > 0 { + // interested in the transition with the lowest precedence + var reduced *PrecedencePredicate + + for _, p := range precedencePredicates { + if reduced == nil || p.precedence > reduced.precedence { + reduced = p + } + } + + operands.add(reduced) + } + + vs := operands.values() + + opnds := make([]SemanticContext, len(vs)) + for i, v := range vs { + opnds[i] = v.(SemanticContext) + } + + o := new(OR) + o.opnds = opnds + + return o +} + +func (o *OR) equals(other interface{}) bool { + if o == other { + return true + } else if _, ok := other.(*OR); !ok { + return false + } else { + for i, v := range other.(*OR).opnds { + if !o.opnds[i].equals(v) { + return false + } + } + return true + } +} + +//

+// The evaluation of predicates by o context is short-circuiting, but +// unordered.

+// +func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool { + for i := 0; i < len(o.opnds); i++ { + if o.opnds[i].evaluate(parser, outerContext) { + return true + } + } + return false +} + +func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext { + differs := false + operands := make([]SemanticContext, 0) + for i := 0; i < len(o.opnds); i++ { + context := o.opnds[i] + evaluated := context.evalPrecedence(parser, outerContext) + differs = differs || (evaluated != context) + if evaluated == SemanticContextNone { + // The OR context is true if any element is true + return SemanticContextNone + } else if evaluated != nil { + // Reduce the result by Skipping false elements + operands = append(operands, evaluated) + } + } + if !differs { + return o + } + if len(operands) == 0 { + // all elements were false, so the OR context is false + return nil + } + var result SemanticContext + + for _, o := range operands { + if result == nil { + result = o + } else { + result = SemanticContextorContext(result, o) + } + } + + return result +} + +func (o *OR) String() string { + s := "" + + for _, o := range o.opnds { + s += "|| " + fmt.Sprint(o) + } + + if len(s) > 3 { + return s[0:3] + } + + return s +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go new file mode 100644 index 0000000000..2d8e99095d --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go @@ -0,0 +1,210 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "strconv" + "strings" +) + +type TokenSourceCharStreamPair struct { + tokenSource TokenSource + charStream CharStream +} + +// A token has properties: text, type, line, character position in the line +// (so we can ignore tabs), token channel, index, and source from which +// we obtained this token. + +type Token interface { + GetSource() *TokenSourceCharStreamPair + GetTokenType() int + GetChannel() int + GetStart() int + GetStop() int + GetLine() int + GetColumn() int + + GetText() string + SetText(s string) + + GetTokenIndex() int + SetTokenIndex(v int) + + GetTokenSource() TokenSource + GetInputStream() CharStream +} + +type BaseToken struct { + source *TokenSourceCharStreamPair + tokenType int // token type of the token + channel int // The parser ignores everything not on DEFAULT_CHANNEL + start int // optional return -1 if not implemented. + stop int // optional return -1 if not implemented. + tokenIndex int // from 0..n-1 of the token object in the input stream + line int // line=1..n of the 1st character + column int // beginning of the line at which it occurs, 0..n-1 + text string // text of the token. + readOnly bool +} + +const ( + TokenInvalidType = 0 + + // During lookahead operations, this "token" signifies we hit rule end ATN state + // and did not follow it despite needing to. + TokenEpsilon = -2 + + TokenMinUserTokenType = 1 + + TokenEOF = -1 + + // All tokens go to the parser (unless Skip() is called in that rule) + // on a particular "channel". The parser tunes to a particular channel + // so that whitespace etc... can go to the parser on a "hidden" channel. + + TokenDefaultChannel = 0 + + // Anything on different channel than DEFAULT_CHANNEL is not parsed + // by parser. + + TokenHiddenChannel = 1 +) + +func (b *BaseToken) GetChannel() int { + return b.channel +} + +func (b *BaseToken) GetStart() int { + return b.start +} + +func (b *BaseToken) GetStop() int { + return b.stop +} + +func (b *BaseToken) GetLine() int { + return b.line +} + +func (b *BaseToken) GetColumn() int { + return b.column +} + +func (b *BaseToken) GetTokenType() int { + return b.tokenType +} + +func (b *BaseToken) GetSource() *TokenSourceCharStreamPair { + return b.source +} + +func (b *BaseToken) GetTokenIndex() int { + return b.tokenIndex +} + +func (b *BaseToken) SetTokenIndex(v int) { + b.tokenIndex = v +} + +func (b *BaseToken) GetTokenSource() TokenSource { + return b.source.tokenSource +} + +func (b *BaseToken) GetInputStream() CharStream { + return b.source.charStream +} + +type CommonToken struct { + *BaseToken +} + +func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken { + + t := new(CommonToken) + + t.BaseToken = new(BaseToken) + + t.source = source + t.tokenType = tokenType + t.channel = channel + t.start = start + t.stop = stop + t.tokenIndex = -1 + if t.source.tokenSource != nil { + t.line = source.tokenSource.GetLine() + t.column = source.tokenSource.GetCharPositionInLine() + } else { + t.column = -1 + } + return t +} + +// An empty {@link Pair} which is used as the default value of +// {@link //source} for tokens that do not have a source. + +//CommonToken.EMPTY_SOURCE = [ nil, nil ] + +// Constructs a New{@link CommonToken} as a copy of another {@link Token}. +// +//

+// If {@code oldToken} is also a {@link CommonToken} instance, the newly +// constructed token will share a reference to the {@link //text} field and +// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will +// be assigned the result of calling {@link //GetText}, and {@link //source} +// will be constructed from the result of {@link Token//GetTokenSource} and +// {@link Token//GetInputStream}.

+// +// @param oldToken The token to copy. +// +func (c *CommonToken) clone() *CommonToken { + t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop) + t.tokenIndex = c.GetTokenIndex() + t.line = c.GetLine() + t.column = c.GetColumn() + t.text = c.GetText() + return t +} + +func (c *CommonToken) GetText() string { + if c.text != "" { + return c.text + } + input := c.GetInputStream() + if input == nil { + return "" + } + n := input.Size() + if c.start < n && c.stop < n { + return input.GetTextFromInterval(NewInterval(c.start, c.stop)) + } + return "" +} + +func (c *CommonToken) SetText(text string) { + c.text = text +} + +func (c *CommonToken) String() string { + txt := c.GetText() + if txt != "" { + txt = strings.Replace(txt, "\n", "\\n", -1) + txt = strings.Replace(txt, "\r", "\\r", -1) + txt = strings.Replace(txt, "\t", "\\t", -1) + } else { + txt = "" + } + + var ch string + if c.channel > 0 { + ch = ",channel=" + strconv.Itoa(c.channel) + } else { + ch = "" + } + + return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" + + txt + "',<" + strconv.Itoa(c.tokenType) + ">" + + ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]" +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go new file mode 100644 index 0000000000..e023978fef --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go @@ -0,0 +1,17 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenSource interface { + NextToken() Token + Skip() + More() + GetLine() int + GetCharPositionInLine() int + GetInputStream() CharStream + GetSourceName() string + setTokenFactory(factory TokenFactory) + GetTokenFactory() TokenFactory +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go new file mode 100644 index 0000000000..df92c81478 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go @@ -0,0 +1,20 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +type TokenStream interface { + IntStream + + LT(k int) Token + + Get(index int) Token + GetTokenSource() TokenSource + SetTokenSource(TokenSource) + + GetAllText() string + GetTextFromInterval(*Interval) string + GetTextFromRuleContext(RuleContext) string + GetTextFromTokens(Token, Token) string +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go new file mode 100644 index 0000000000..96a03f02aa --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go @@ -0,0 +1,649 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. +package antlr + +import ( +"bytes" +"fmt" +) + + +// +// Useful for rewriting out a buffered input token stream after doing some +// augmentation or other manipulations on it. + +//

+// You can insert stuff, replace, and delete chunks. Note that the operations +// are done lazily--only if you convert the buffer to a {@link String} with +// {@link TokenStream#getText()}. This is very efficient because you are not +// moving data around all the time. As the buffer of tokens is converted to +// strings, the {@link #getText()} method(s) scan the input token stream and +// check to see if there is an operation at the current index. If so, the +// operation is done and then normal {@link String} rendering continues on the +// buffer. This is like having multiple Turing machine instruction streams +// (programs) operating on a single input tape. :)

+//

+ +// This rewriter makes no modifications to the token stream. It does not ask the +// stream to fill itself up nor does it advance the input cursor. The token +// stream {@link TokenStream#index()} will return the same value before and +// after any {@link #getText()} call.

+ +//

+// The rewriter only works on tokens that you have in the buffer and ignores the +// current input cursor. If you are buffering tokens on-demand, calling +// {@link #getText()} halfway through the input will only do rewrites for those +// tokens in the first half of the file.

+ +//

+// Since the operations are done lazily at {@link #getText}-time, operations do +// not screw up the token index values. That is, an insert operation at token +// index {@code i} does not change the index values for tokens +// {@code i}+1..n-1.

+ +//

+// Because operations never actually alter the buffer, you may always get the +// original token stream back without undoing anything. Since the instructions +// are queued up, you can easily simulate transactions and roll back any changes +// if there is an error just by removing instructions. For example,

+ +//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+// 
+ +//

+// Then in the rules, you can execute (assuming rewriter is visible):

+ +//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+// 
+ +//

+// You can also have multiple "instruction streams" and get multiple rewrites +// from a single pass over the input. Just name the instruction streams and use +// that name again when printing the buffer. This could be useful for generating +// a C file and also its header file--all from the same buffer:

+ +//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+// 
+ +//

+// If you don't use named rewrite streams, a "default" stream is used as the +// first example shows.

+ + + +const( + Default_Program_Name = "default" + Program_Init_Size = 100 + Min_Token_Index = 0 +) + +// Define the rewrite operation hierarchy + +type RewriteOperation interface { + // Execute the rewrite operation by possibly adding to the buffer. + // Return the index of the next token to operate on. + Execute(buffer *bytes.Buffer) int + String() string + GetInstructionIndex() int + GetIndex() int + GetText() string + GetOpName() string + GetTokens() TokenStream + SetInstructionIndex(val int) + SetIndex(int) + SetText(string) + SetOpName(string) + SetTokens(TokenStream) +} + +type BaseRewriteOperation struct { + //Current index of rewrites list + instruction_index int + //Token buffer index + index int + //Substitution text + text string + //Actual operation name + op_name string + //Pointer to token steam + tokens TokenStream +} + +func (op *BaseRewriteOperation)GetInstructionIndex() int{ + return op.instruction_index +} + +func (op *BaseRewriteOperation)GetIndex() int{ + return op.index +} + +func (op *BaseRewriteOperation)GetText() string{ + return op.text +} + +func (op *BaseRewriteOperation)GetOpName() string{ + return op.op_name +} + +func (op *BaseRewriteOperation)GetTokens() TokenStream{ + return op.tokens +} + +func (op *BaseRewriteOperation)SetInstructionIndex(val int){ + op.instruction_index = val +} + +func (op *BaseRewriteOperation)SetIndex(val int) { + op.index = val +} + +func (op *BaseRewriteOperation)SetText(val string){ + op.text = val +} + +func (op *BaseRewriteOperation)SetOpName(val string){ + op.op_name = val +} + +func (op *BaseRewriteOperation)SetTokens(val TokenStream) { + op.tokens = val +} + + +func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{ + return op.index +} + +func (op *BaseRewriteOperation) String() string { + return fmt.Sprintf("<%s@%d:\"%s\">", + op.op_name, + op.tokens.Get(op.GetIndex()), + op.text, + ) + +} + + +type InsertBeforeOp struct { + BaseRewriteOperation +} + +func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{ + return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index, + text:text, + op_name:"InsertBeforeOp", + tokens:stream, + }} +} + +func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{ + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertBeforeOp) String() string { + return op.BaseRewriteOperation.String() +} + +// Distinguish between insert after/before to do the "insert afters" +// first and then the "insert befores" at same index. Implementation +// of "insert after" is "insert before index+1". + +type InsertAfterOp struct { + BaseRewriteOperation +} + +func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{ + return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{ + index:index+1, + text:text, + tokens:stream, + }} +} + +func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int { + buffer.WriteString(op.text) + if op.tokens.Get(op.index).GetTokenType() != TokenEOF{ + buffer.WriteString(op.tokens.Get(op.index).GetText()) + } + return op.index+1 +} + +func (op *InsertAfterOp) String() string { + return op.BaseRewriteOperation.String() +} + +// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp +// instructions. +type ReplaceOp struct{ + BaseRewriteOperation + LastIndex int +} + +func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp { + return &ReplaceOp{ + BaseRewriteOperation:BaseRewriteOperation{ + index:from, + text:text, + op_name:"ReplaceOp", + tokens:stream, + }, + LastIndex:to, + } +} + +func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{ + if op.text != ""{ + buffer.WriteString(op.text) + } + return op.LastIndex +1 +} + +func (op *ReplaceOp) String() string { + if op.text == "" { + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex)) + } + return fmt.Sprintf("", + op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text) +} + + +type TokenStreamRewriter struct { + //Our source stream + tokens TokenStream + // You may have multiple, named streams of rewrite operations. + // I'm calling these things "programs." + // Maps String (name) → rewrite (List) + programs map[string][]RewriteOperation + last_rewrite_token_indexes map[string]int +} + +func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{ + return &TokenStreamRewriter{ + tokens: tokens, + programs: map[string][]RewriteOperation{ + Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size), + }, + last_rewrite_token_indexes: map[string]int{}, + } +} + +func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{ + return tsr.tokens +} + +// Rollback the instruction stream for a program so that +// the indicated instruction (via instructionIndex) is no +// longer in the stream. UNTESTED! +func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){ + is, ok := tsr.programs[program_name] + if ok{ + tsr.programs[program_name] = is[Min_Token_Index:instruction_index] + } +} + +func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){ + tsr.Rollback(Default_Program_Name, instruction_index) +} +//Reset the program so that no instructions exist +func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){ + tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included +} + +func (tsr *TokenStreamRewriter) DeleteProgramDefault(){ + tsr.DeleteProgram(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){ + // to insert after, just insert before next index (even if past end) + var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){ + tsr.InsertAfter(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){ + tsr.InsertAfter(program_name, token.GetTokenIndex(), text) +} + +func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){ + var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){ + tsr.InsertBefore(Default_Program_Name, index, text) +} + +func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){ + tsr.InsertBefore(program_name, token.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){ + if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){ + panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)", + from, to, tsr.tokens.Size())) + } + var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens) + rewrites := tsr.GetProgram(program_name) + op.SetInstructionIndex(len(rewrites)) + tsr.AddToProgram(program_name, op) +} + +func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) { + tsr.Replace(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){ + tsr.ReplaceDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){ + tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){ + tsr.ReplaceToken(Default_Program_Name, from, to, text) +} + +func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){ + tsr.ReplaceTokenDefault(index, index, text) +} + +func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){ + tsr.Replace(program_name, from, to, "" ) +} + +func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){ + tsr.Delete(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){ + tsr.DeleteDefault(index,index) +} + +func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) { + tsr.ReplaceToken(program_name, from, to, "") +} + +func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){ + tsr.DeleteToken(Default_Program_Name, from, to) +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int { + i, ok := tsr.last_rewrite_token_indexes[program_name] + if !ok{ + return -1 + } + return i +} + +func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{ + return tsr.GetLastRewriteTokenIndex(Default_Program_Name) +} + +func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){ + tsr.last_rewrite_token_indexes[program_name] = i +} + +func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{ + is := make([]RewriteOperation, 0, Program_Init_Size) + tsr.programs[name] = is + return is +} + +func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){ + is := tsr.GetProgram(name) + is = append(is, op) + tsr.programs[name] = is +} + +func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation { + is, ok := tsr.programs[name] + if !ok{ + is = tsr.InitializeProgram(name) + } + return is +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetTextDefault() string{ + return tsr.GetText( + Default_Program_Name, + NewInterval(0, tsr.tokens.Size()-1)) +} +// Return the text from the original tokens altered per the +// instructions given to this rewriter. +func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string { + rewrites := tsr.programs[program_name] + start := interval.Start + stop := interval.Stop + // ensure start/end are in range + stop = min(stop, tsr.tokens.Size()-1) + start = max(start,0) + if rewrites == nil || len(rewrites) == 0{ + return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute + } + buf := bytes.Buffer{} + // First, optimize instruction stream + indexToOp := reduceToSingleOperationPerIndex(rewrites) + // Walk buffer, executing instructions and emitting tokens + for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())} + } + } + return buf.String() +} + +// We need to combine operations and report invalid operations (like +// overlapping replaces that are not completed nested). Inserts to +// same index need to be combined etc... Here are the cases: +// +// I.i.u I.j.v leave alone, nonoverlapping +// I.i.u I.i.v combine: Iivu +// +// R.i-j.u R.x-y.v | i-j in x-y delete first R +// R.i-j.u R.i-j.v delete first R +// R.i-j.u R.x-y.v | x-y in i-j ERROR +// R.i-j.u R.x-y.v | boundaries overlap ERROR +// +// Delete special case of replace (text==null): +// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) +// +// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before +// we're not deleting i) +// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping +// R.x-y.v I.i.u | i in x-y ERROR +// R.x-y.v I.x.u R.x-y.uv (combine, delete I) +// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping +// +// I.i.u = insert u before op @ index i +// R.x-y.u = replace x-y indexed tokens with u +// +// First we need to examine replaces. For any replace op: +// +// 1. wipe out any insertions before op within that range. +// 2. Drop any replace op before that is contained completely within +// that range. +// 3. Throw exception upon boundary overlap with any previous replace. +// +// Then we can deal with inserts: +// +// 1. for any inserts to same index, combine even if not adjacent. +// 2. for any prior replace with same left boundary, combine this +// insert with replace and delete this replace. +// 3. throw exception if index in same range as previous replace +// +// Don't actually delete; make op null in list. Easier to walk list. +// Later we can throw as we add to index → op map. +// +// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the +// inserted stuff would be before the replace range. But, if you +// add tokens in front of a method body '{' and then delete the method +// body, I think the stuff before the '{' you added should disappear too. +// +// Return a map from token index to operation. +// +func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{ + // WALK REPLACES + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil{continue} + rop, ok := op.(*ReplaceOp) + if !ok{continue} + // Wipe prior inserts within range + for j:=0; j rop.index && iop.index <=rop.LastIndex{ + // delete insert as it's a no-op. + rewrites[iop.instruction_index] = nil + } + } + } + // Drop any prior replaces contained within + for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{ + // delete replace as it's a no-op. + rewrites[prevop.instruction_index] = nil + continue + } + // throw exception unless disjoint or identical + disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex + // Delete special case of replace (text==null): + // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right) + if prevop.text == "" && rop.text == "" && !disjoint{ + rewrites[prevop.instruction_index] = nil + rop.index = min(prevop.index, rop.index) + rop.LastIndex = max(prevop.LastIndex, rop.LastIndex) + println("new rop" + rop.String()) //TODO: remove console write, taken from Java version + }else if !disjoint{ + panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String()) + } + } + } + } + // WALK INSERTS + for i:=0; i < len(rewrites); i++ { + op := rewrites[i] + if op == nil{continue} + //hack to replicate inheritance in composition + _, iok := rewrites[i].(*InsertBeforeOp) + _, aok := rewrites[i].(*InsertAfterOp) + if !iok && !aok{continue} + iop := rewrites[i] + // combine current insert with prior if any at same index + // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic + for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{ + panic("insert op "+iop.String()+" within boundaries of previous "+rop.String()) + } + } + } + } + m := map[int]RewriteOperation{} + for i:=0; i < len(rewrites); i++{ + op := rewrites[i] + if op == nil {continue} + if _, ok := m[op.GetIndex()]; ok{ + panic("should only be one op per index") + } + m[op.GetIndex()] = op + } + return m +} + + +/* + Quick fixing Go lack of overloads + */ + +func max(a,b int)int{ + if a>b{ + return a + }else { + return b + } +} +func min(a,b int)int{ + if aThis is a one way link. It emanates from a state (usually via a list of +// transitions) and has a target state.

+// +//

Since we never have to change the ATN transitions once we construct it, +// the states. We'll use the term Edge for the DFA to distinguish them from +// ATN transitions.

+ +type Transition interface { + getTarget() ATNState + setTarget(ATNState) + getIsEpsilon() bool + getLabel() *IntervalSet + getSerializationType() int + Matches(int, int, int) bool +} + +type BaseTransition struct { + target ATNState + isEpsilon bool + label int + intervalSet *IntervalSet + serializationType int +} + +func NewBaseTransition(target ATNState) *BaseTransition { + + if target == nil { + panic("target cannot be nil.") + } + + t := new(BaseTransition) + + t.target = target + // Are we epsilon, action, sempred? + t.isEpsilon = false + t.intervalSet = nil + + return t +} + +func (t *BaseTransition) getTarget() ATNState { + return t.target +} + +func (t *BaseTransition) setTarget(s ATNState) { + t.target = s +} + +func (t *BaseTransition) getIsEpsilon() bool { + return t.isEpsilon +} + +func (t *BaseTransition) getLabel() *IntervalSet { + return t.intervalSet +} + +func (t *BaseTransition) getSerializationType() int { + return t.serializationType +} + +func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + panic("Not implemented") +} + +const ( + TransitionEPSILON = 1 + TransitionRANGE = 2 + TransitionRULE = 3 + TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}? + TransitionATOM = 5 + TransitionACTION = 6 + TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2 + TransitionNOTSET = 8 + TransitionWILDCARD = 9 + TransitionPRECEDENCE = 10 +) + +var TransitionserializationNames = []string{ + "INVALID", + "EPSILON", + "RANGE", + "RULE", + "PREDICATE", + "ATOM", + "ACTION", + "SET", + "NOT_SET", + "WILDCARD", + "PRECEDENCE", +} + +//var TransitionserializationTypes struct { +// EpsilonTransition int +// RangeTransition int +// RuleTransition int +// PredicateTransition int +// AtomTransition int +// ActionTransition int +// SetTransition int +// NotSetTransition int +// WildcardTransition int +// PrecedencePredicateTransition int +//}{ +// TransitionEPSILON, +// TransitionRANGE, +// TransitionRULE, +// TransitionPREDICATE, +// TransitionATOM, +// TransitionACTION, +// TransitionSET, +// TransitionNOTSET, +// TransitionWILDCARD, +// TransitionPRECEDENCE +//} + +// TODO: make all transitions sets? no, should remove set edges +type AtomTransition struct { + *BaseTransition +} + +func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition { + + t := new(AtomTransition) + t.BaseTransition = NewBaseTransition(target) + + t.label = intervalSet // The token type or character value or, signifies special intervalSet. + t.intervalSet = t.makeLabel() + t.serializationType = TransitionATOM + + return t +} + +func (t *AtomTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addOne(t.label) + return s +} + +func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.label == symbol +} + +func (t *AtomTransition) String() string { + return strconv.Itoa(t.label) +} + +type RuleTransition struct { + *BaseTransition + + followState ATNState + ruleIndex, precedence int +} + +func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition { + + t := new(RuleTransition) + t.BaseTransition = NewBaseTransition(ruleStart) + + t.ruleIndex = ruleIndex + t.precedence = precedence + t.followState = followState + t.serializationType = TransitionRULE + t.isEpsilon = true + + return t +} + +func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +type EpsilonTransition struct { + *BaseTransition + + outermostPrecedenceReturn int +} + +func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition { + + t := new(EpsilonTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionEPSILON + t.isEpsilon = true + t.outermostPrecedenceReturn = outermostPrecedenceReturn + return t +} + +func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *EpsilonTransition) String() string { + return "epsilon" +} + +type RangeTransition struct { + *BaseTransition + + start, stop int +} + +func NewRangeTransition(target ATNState, start, stop int) *RangeTransition { + + t := new(RangeTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionRANGE + t.start = start + t.stop = stop + t.intervalSet = t.makeLabel() + return t +} + +func (t *RangeTransition) makeLabel() *IntervalSet { + s := NewIntervalSet() + s.addRange(t.start, t.stop) + return s +} + +func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= t.start && symbol <= t.stop +} + +func (t *RangeTransition) String() string { + return "'" + string(t.start) + "'..'" + string(t.stop) + "'" +} + +type AbstractPredicateTransition interface { + Transition + IAbstractPredicateTransitionFoo() +} + +type BaseAbstractPredicateTransition struct { + *BaseTransition +} + +func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition { + + t := new(BaseAbstractPredicateTransition) + t.BaseTransition = NewBaseTransition(target) + + return t +} + +func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {} + +type PredicateTransition struct { + *BaseAbstractPredicateTransition + + isCtxDependent bool + ruleIndex, predIndex int +} + +func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition { + + t := new(PredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPREDICATE + t.ruleIndex = ruleIndex + t.predIndex = predIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PredicateTransition) getPredicate() *Predicate { + return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent) +} + +func (t *PredicateTransition) String() string { + return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex) +} + +type ActionTransition struct { + *BaseTransition + + isCtxDependent bool + ruleIndex, actionIndex, predIndex int +} + +func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition { + + t := new(ActionTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionACTION + t.ruleIndex = ruleIndex + t.actionIndex = actionIndex + t.isCtxDependent = isCtxDependent // e.g., $i ref in pred + t.isEpsilon = true + return t +} + +func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *ActionTransition) String() string { + return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex) +} + +type SetTransition struct { + *BaseTransition +} + +func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition { + + t := new(SetTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionSET + if set != nil { + t.intervalSet = set + } else { + t.intervalSet = NewIntervalSet() + t.intervalSet.addOne(TokenInvalidType) + } + + return t +} + +func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return t.intervalSet.contains(symbol) +} + +func (t *SetTransition) String() string { + return t.intervalSet.String() +} + +type NotSetTransition struct { + *SetTransition +} + +func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition { + + t := new(NotSetTransition) + + t.SetTransition = NewSetTransition(target, set) + + t.serializationType = TransitionNOTSET + + return t +} + +func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol) +} + +func (t *NotSetTransition) String() string { + return "~" + t.intervalSet.String() +} + +type WildcardTransition struct { + *BaseTransition +} + +func NewWildcardTransition(target ATNState) *WildcardTransition { + + t := new(WildcardTransition) + t.BaseTransition = NewBaseTransition(target) + + t.serializationType = TransitionWILDCARD + return t +} + +func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return symbol >= minVocabSymbol && symbol <= maxVocabSymbol +} + +func (t *WildcardTransition) String() string { + return "." +} + +type PrecedencePredicateTransition struct { + *BaseAbstractPredicateTransition + + precedence int +} + +func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition { + + t := new(PrecedencePredicateTransition) + t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target) + + t.serializationType = TransitionPRECEDENCE + t.precedence = precedence + t.isEpsilon = true + + return t +} + +func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool { + return false +} + +func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate { + return NewPrecedencePredicate(t.precedence) +} + +func (t *PrecedencePredicateTransition) String() string { + return fmt.Sprint(t.precedence) + " >= _p" +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go new file mode 100644 index 0000000000..bdeb6d7881 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go @@ -0,0 +1,256 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +// The basic notion of a tree has a parent, a payload, and a list of children. +// It is the most abstract interface for all the trees used by ANTLR. +/// + +var TreeInvalidInterval = NewInterval(-1, -2) + +type Tree interface { + GetParent() Tree + SetParent(Tree) + GetPayload() interface{} + GetChild(i int) Tree + GetChildCount() int + GetChildren() []Tree +} + +type SyntaxTree interface { + Tree + + GetSourceInterval() *Interval +} + +type ParseTree interface { + SyntaxTree + + Accept(Visitor ParseTreeVisitor) interface{} + GetText() string + + ToStringTree([]string, Recognizer) string +} + +type RuleNode interface { + ParseTree + + GetRuleContext() RuleContext + GetBaseRuleContext() *BaseRuleContext +} + +type TerminalNode interface { + ParseTree + + GetSymbol() Token +} + +type ErrorNode interface { + TerminalNode + + errorNode() +} + +type ParseTreeVisitor interface { + Visit(tree ParseTree) interface{} + VisitChildren(node RuleNode) interface{} + VisitTerminal(node TerminalNode) interface{} + VisitErrorNode(node ErrorNode) interface{} +} + +type BaseParseTreeVisitor struct{} + +var _ ParseTreeVisitor = &BaseParseTreeVisitor{} + +func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil } +func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil } + +// TODO +//func (this ParseTreeVisitor) Visit(ctx) { +// if (Utils.isArray(ctx)) { +// self := this +// return ctx.map(function(child) { return VisitAtom(self, child)}) +// } else { +// return VisitAtom(this, ctx) +// } +//} +// +//func VisitAtom(Visitor, ctx) { +// if (ctx.parser == nil) { //is terminal +// return +// } +// +// name := ctx.parser.ruleNames[ctx.ruleIndex] +// funcName := "Visit" + Utils.titleCase(name) +// +// return Visitor[funcName](ctx) +//} + +type ParseTreeListener interface { + VisitTerminal(node TerminalNode) + VisitErrorNode(node ErrorNode) + EnterEveryRule(ctx ParserRuleContext) + ExitEveryRule(ctx ParserRuleContext) +} + +type BaseParseTreeListener struct{} + +var _ ParseTreeListener = &BaseParseTreeListener{} + +func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {} +func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {} +func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {} +func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {} + +type TerminalNodeImpl struct { + parentCtx RuleContext + + symbol Token +} + +var _ TerminalNode = &TerminalNodeImpl{} + +func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl { + tn := new(TerminalNodeImpl) + + tn.parentCtx = nil + tn.symbol = symbol + + return tn +} + +func (t *TerminalNodeImpl) GetChild(i int) Tree { + return nil +} + +func (t *TerminalNodeImpl) GetChildren() []Tree { + return nil +} + +func (t *TerminalNodeImpl) SetChildren(tree []Tree) { + panic("Cannot set children on terminal node") +} + +func (t *TerminalNodeImpl) GetSymbol() Token { + return t.symbol +} + +func (t *TerminalNodeImpl) GetParent() Tree { + return t.parentCtx +} + +func (t *TerminalNodeImpl) SetParent(tree Tree) { + t.parentCtx = tree.(RuleContext) +} + +func (t *TerminalNodeImpl) GetPayload() interface{} { + return t.symbol +} + +func (t *TerminalNodeImpl) GetSourceInterval() *Interval { + if t.symbol == nil { + return TreeInvalidInterval + } + tokenIndex := t.symbol.GetTokenIndex() + return NewInterval(tokenIndex, tokenIndex) +} + +func (t *TerminalNodeImpl) GetChildCount() int { + return 0 +} + +func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitTerminal(t) +} + +func (t *TerminalNodeImpl) GetText() string { + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) String() string { + if t.symbol.GetTokenType() == TokenEOF { + return "" + } + + return t.symbol.GetText() +} + +func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string { + return t.String() +} + +// Represents a token that was consumed during reSynchronization +// rather than during a valid Match operation. For example, +// we will create this kind of a node during single token insertion +// and deletion as well as during "consume until error recovery set" +// upon no viable alternative exceptions. + +type ErrorNodeImpl struct { + *TerminalNodeImpl +} + +var _ ErrorNode = &ErrorNodeImpl{} + +func NewErrorNodeImpl(token Token) *ErrorNodeImpl { + en := new(ErrorNodeImpl) + en.TerminalNodeImpl = NewTerminalNodeImpl(token) + return en +} + +func (e *ErrorNodeImpl) errorNode() {} + +func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} { + return v.VisitErrorNode(e) +} + +type ParseTreeWalker struct { +} + +func NewParseTreeWalker() *ParseTreeWalker { + return new(ParseTreeWalker) +} + +// Performs a walk on the given parse tree starting at the root and going down recursively +// with depth-first search. On each node, EnterRule is called before +// recursively walking down into child nodes, then +// ExitRule is called after the recursive call to wind up. +func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) { + switch tt := t.(type) { + case ErrorNode: + listener.VisitErrorNode(tt) + case TerminalNode: + listener.VisitTerminal(tt) + default: + p.EnterRule(listener, t.(RuleNode)) + for i := 0; i < t.GetChildCount(); i++ { + child := t.GetChild(i) + p.Walk(listener, child) + } + p.ExitRule(listener, t.(RuleNode)) + } +} + +// +// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule} +// then by triggering the event specific to the given parse tree node +// +func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + listener.EnterEveryRule(ctx) + ctx.EnterRule(listener) +} + +// Exits a grammar rule by first triggering the event specific to the given parse tree node +// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule} +// +func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) { + ctx := r.GetRuleContext().(ParserRuleContext) + ctx.ExitRule(listener) + listener.ExitEveryRule(ctx) +} + +var ParseTreeWalkerDefault = NewParseTreeWalker() diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go new file mode 100644 index 0000000000..80144ecade --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go @@ -0,0 +1,137 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import "fmt" + +/** A set of utility routines useful for all kinds of ANTLR trees. */ + +// Print out a whole tree in LISP form. {@link //getNodeText} is used on the +// node payloads to get the text for the nodes. Detect +// parse trees and extract data appropriately. +func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string { + + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + s := TreesGetNodeText(tree, ruleNames, nil) + + s = EscapeWhitespace(s, false) + c := tree.GetChildCount() + if c == 0 { + return s + } + res := "(" + s + " " + if c > 0 { + s = TreesStringTree(tree.GetChild(0), ruleNames, nil) + res += s + } + for i := 1; i < c; i++ { + s = TreesStringTree(tree.GetChild(i), ruleNames, nil) + res += (" " + s) + } + res += ")" + return res +} + +func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string { + if recog != nil { + ruleNames = recog.GetRuleNames() + } + + if ruleNames != nil { + switch t2 := t.(type) { + case RuleNode: + t3 := t2.GetRuleContext() + altNumber := t3.GetAltNumber() + + if altNumber != ATNInvalidAltNumber { + return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber) + } + return ruleNames[t3.GetRuleIndex()] + case ErrorNode: + return fmt.Sprint(t2) + case TerminalNode: + if t2.GetSymbol() != nil { + return t2.GetSymbol().GetText() + } + } + } + + // no recog for rule names + payload := t.GetPayload() + if p2, ok := payload.(Token); ok { + return p2.GetText() + } + + return fmt.Sprint(t.GetPayload()) +} + +// Return ordered list of all children of this node +func TreesGetChildren(t Tree) []Tree { + list := make([]Tree, 0) + for i := 0; i < t.GetChildCount(); i++ { + list = append(list, t.GetChild(i)) + } + return list +} + +// Return a list of all ancestors of this node. The first node of +// list is the root and the last is the parent of this node. +// +func TreesgetAncestors(t Tree) []Tree { + ancestors := make([]Tree, 0) + t = t.GetParent() + for t != nil { + f := []Tree{t} + ancestors = append(f, ancestors...) + t = t.GetParent() + } + return ancestors +} + +func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree { + return TreesfindAllNodes(t, ttype, true) +} + +func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree { + return TreesfindAllNodes(t, ruleIndex, false) +} + +func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree { + nodes := make([]ParseTree, 0) + treesFindAllNodes(t, index, findTokens, &nodes) + return nodes +} + +func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) { + // check this node (the root) first + + t2, ok := t.(TerminalNode) + t3, ok2 := t.(ParserRuleContext) + + if findTokens && ok { + if t2.GetSymbol().GetTokenType() == index { + *nodes = append(*nodes, t2) + } + } else if !findTokens && ok2 { + if t3.GetRuleIndex() == index { + *nodes = append(*nodes, t3) + } + } + // check children + for i := 0; i < t.GetChildCount(); i++ { + treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes) + } +} + +func TreesDescendants(t ParseTree) []ParseTree { + nodes := []ParseTree{t} + for i := 0; i < t.GetChildCount(); i++ { + nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...) + } + return nodes +} diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go new file mode 100644 index 0000000000..bba2ffae79 --- /dev/null +++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go @@ -0,0 +1,417 @@ +// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved. +// Use of this file is governed by the BSD 3-clause license that +// can be found in the LICENSE.txt file in the project root. + +package antlr + +import ( + "bytes" + "errors" + "fmt" + "sort" + "strconv" + "strings" +) + +func intMin(a, b int) int { + if a < b { + return a + } + return b +} + +func intMax(a, b int) int { + if a > b { + return a + } + return b +} + +// A simple integer stack + +type IntStack []int + +var ErrEmptyStack = errors.New("Stack is empty") + +func (s *IntStack) Pop() (int, error) { + l := len(*s) - 1 + if l < 0 { + return 0, ErrEmptyStack + } + v := (*s)[l] + *s = (*s)[0:l] + return v, nil +} + +func (s *IntStack) Push(e int) { + *s = append(*s, e) +} + +type Set struct { + data map[int][]interface{} + hashcodeFunction func(interface{}) int + equalsFunction func(interface{}, interface{}) bool +} + +func NewSet( + hashcodeFunction func(interface{}) int, + equalsFunction func(interface{}, interface{}) bool) *Set { + + s := new(Set) + + s.data = make(map[int][]interface{}) + + if hashcodeFunction != nil { + s.hashcodeFunction = hashcodeFunction + } else { + s.hashcodeFunction = standardHashFunction + } + + if equalsFunction == nil { + s.equalsFunction = standardEqualsFunction + } else { + s.equalsFunction = equalsFunction + } + + return s +} + +func standardEqualsFunction(a interface{}, b interface{}) bool { + + ac, oka := a.(comparable) + bc, okb := b.(comparable) + + if !oka || !okb { + panic("Not Comparable") + } + + return ac.equals(bc) +} + +func standardHashFunction(a interface{}) int { + if h, ok := a.(hasher); ok { + return h.hash() + } + + panic("Not Hasher") +} + +type hasher interface { + hash() int +} + +func (s *Set) length() int { + return len(s.data) +} + +func (s *Set) add(value interface{}) interface{} { + + key := s.hashcodeFunction(value) + + values := s.data[key] + + if s.data[key] != nil { + for i := 0; i < len(values); i++ { + if s.equalsFunction(value, values[i]) { + return values[i] + } + } + + s.data[key] = append(s.data[key], value) + return value + } + + v := make([]interface{}, 1, 10) + v[0] = value + s.data[key] = v + + return value +} + +func (s *Set) contains(value interface{}) bool { + + key := s.hashcodeFunction(value) + + values := s.data[key] + + if s.data[key] != nil { + for i := 0; i < len(values); i++ { + if s.equalsFunction(value, values[i]) { + return true + } + } + } + return false +} + +func (s *Set) values() []interface{} { + var l []interface{} + + for _, v := range s.data { + l = append(l, v...) + } + + return l +} + +func (s *Set) String() string { + r := "" + + for _, av := range s.data { + for _, v := range av { + r += fmt.Sprint(v) + } + } + + return r +} + +type BitSet struct { + data map[int]bool +} + +func NewBitSet() *BitSet { + b := new(BitSet) + b.data = make(map[int]bool) + return b +} + +func (b *BitSet) add(value int) { + b.data[value] = true +} + +func (b *BitSet) clear(index int) { + delete(b.data, index) +} + +func (b *BitSet) or(set *BitSet) { + for k := range set.data { + b.add(k) + } +} + +func (b *BitSet) remove(value int) { + delete(b.data, value) +} + +func (b *BitSet) contains(value int) bool { + return b.data[value] +} + +func (b *BitSet) values() []int { + ks := make([]int, len(b.data)) + i := 0 + for k := range b.data { + ks[i] = k + i++ + } + sort.Ints(ks) + return ks +} + +func (b *BitSet) minValue() int { + min := 2147483647 + + for k := range b.data { + if k < min { + min = k + } + } + + return min +} + +func (b *BitSet) equals(other interface{}) bool { + otherBitSet, ok := other.(*BitSet) + if !ok { + return false + } + + if len(b.data) != len(otherBitSet.data) { + return false + } + + for k, v := range b.data { + if otherBitSet.data[k] != v { + return false + } + } + + return true +} + +func (b *BitSet) length() int { + return len(b.data) +} + +func (b *BitSet) String() string { + vals := b.values() + valsS := make([]string, len(vals)) + + for i, val := range vals { + valsS[i] = strconv.Itoa(val) + } + return "{" + strings.Join(valsS, ", ") + "}" +} + +type AltDict struct { + data map[string]interface{} +} + +func NewAltDict() *AltDict { + d := new(AltDict) + d.data = make(map[string]interface{}) + return d +} + +func (a *AltDict) Get(key string) interface{} { + key = "k-" + key + return a.data[key] +} + +func (a *AltDict) put(key string, value interface{}) { + key = "k-" + key + a.data[key] = value +} + +func (a *AltDict) values() []interface{} { + vs := make([]interface{}, len(a.data)) + i := 0 + for _, v := range a.data { + vs[i] = v + i++ + } + return vs +} + +type DoubleDict struct { + data map[int]map[int]interface{} +} + +func NewDoubleDict() *DoubleDict { + dd := new(DoubleDict) + dd.data = make(map[int]map[int]interface{}) + return dd +} + +func (d *DoubleDict) Get(a, b int) interface{} { + data := d.data[a] + + if data == nil { + return nil + } + + return data[b] +} + +func (d *DoubleDict) set(a, b int, o interface{}) { + data := d.data[a] + + if data == nil { + data = make(map[int]interface{}) + d.data[a] = data + } + + data[b] = o +} + +func EscapeWhitespace(s string, escapeSpaces bool) string { + + s = strings.Replace(s, "\t", "\\t", -1) + s = strings.Replace(s, "\n", "\\n", -1) + s = strings.Replace(s, "\r", "\\r", -1) + if escapeSpaces { + s = strings.Replace(s, " ", "\u00B7", -1) + } + return s +} + +func TerminalNodeToStringArray(sa []TerminalNode) []string { + st := make([]string, len(sa)) + + for i, s := range sa { + st[i] = fmt.Sprintf("%v", s) + } + + return st +} + +func PrintArrayJavaStyle(sa []string) string { + var buffer bytes.Buffer + + buffer.WriteString("[") + + for i, s := range sa { + buffer.WriteString(s) + if i != len(sa)-1 { + buffer.WriteString(", ") + } + } + + buffer.WriteString("]") + + return buffer.String() +} + +// The following routines were lifted from bits.rotate* available in Go 1.9. + +const uintSize = 32 << (^uint(0) >> 32 & 1) // 32 or 64 + +// rotateLeft returns the value of x rotated left by (k mod UintSize) bits. +// To rotate x right by k bits, call RotateLeft(x, -k). +func rotateLeft(x uint, k int) uint { + if uintSize == 32 { + return uint(rotateLeft32(uint32(x), k)) + } + return uint(rotateLeft64(uint64(x), k)) +} + +// rotateLeft32 returns the value of x rotated left by (k mod 32) bits. +func rotateLeft32(x uint32, k int) uint32 { + const n = 32 + s := uint(k) & (n - 1) + return x<>(n-s) +} + +// rotateLeft64 returns the value of x rotated left by (k mod 64) bits. +func rotateLeft64(x uint64, k int) uint64 { + const n = 64 + s := uint(k) & (n - 1) + return x<>(n-s) +} + + +// murmur hash +const ( + c1_32 uint = 0xCC9E2D51 + c2_32 uint = 0x1B873593 + n1_32 uint = 0xE6546B64 +) + +func murmurInit(seed int) int { + return seed +} + +func murmurUpdate(h1 int, k1 int) int { + var k1u uint + k1u = uint(k1) * c1_32 + k1u = rotateLeft(k1u, 15) + k1u *= c2_32 + + var h1u = uint(h1) ^ k1u + k1u = rotateLeft(k1u, 13) + h1u = h1u*5 + 0xe6546b64 + return int(h1u) +} + +func murmurFinish(h1 int, numberOfWords int) int { + var h1u uint = uint(h1) + h1u ^= uint(numberOfWords * 4) + h1u ^= h1u >> 16 + h1u *= uint(0x85ebca6b) + h1u ^= h1u >> 13 + h1u *= 0xc2b2ae35 + h1u ^= h1u >> 16 + + return int(h1u) +} diff --git a/vendor/github.com/apex/log/.gitignore b/vendor/github.com/apex/log/.gitignore new file mode 100644 index 0000000000..7a6353d673 --- /dev/null +++ b/vendor/github.com/apex/log/.gitignore @@ -0,0 +1 @@ +.envrc diff --git a/vendor/github.com/apex/log/History.md b/vendor/github.com/apex/log/History.md new file mode 100644 index 0000000000..a303177b3c --- /dev/null +++ b/vendor/github.com/apex/log/History.md @@ -0,0 +1,75 @@ + +v1.9.0 / 2020-08-18 +=================== + + * add `WithDuration()` method to record a duration as milliseconds + * add: ignore nil errors in `WithError()` + * change trace duration to milliseconds (arguably a breaking change) + +v1.8.0 / 2020-08-05 +=================== + + * refactor apexlogs handler to not make the AddEvents() call if there are no events to flush + +v1.7.1 / 2020-08-05 +=================== + + * fix potential nil panic in apexlogs handler + +v1.7.0 / 2020-08-03 +=================== + + * add FlushSync() to apexlogs handler + +v1.6.0 / 2020-07-13 +=================== + + * update apex/logs dep to v1.0.0 + * docs: mention that Flush() is non-blocking now, use Close() + +v1.5.0 / 2020-07-11 +=================== + + * add buffering to Apex Logs handler + +v1.4.0 / 2020-06-16 +=================== + + * add AuthToken to apexlogs handler + +v1.3.0 / 2020-05-26 +=================== + + * change FromContext() to always return a logger + +v1.2.0 / 2020-05-26 +=================== + + * add log.NewContext() and log.FromContext(). Closes #78 + +v1.1.4 / 2020-04-22 +=================== + + * add apexlogs HTTPClient support + +v1.1.3 / 2020-04-22 +=================== + + * add events len check before flushing to apexlogs handler + +v1.1.2 / 2020-01-29 +=================== + + * refactor apexlogs handler to use github.com/apex/logs client + +v1.1.1 / 2019-06-24 +=================== + + * add go.mod + * add rough pass at apexlogs handler + +v1.1.0 / 2018-10-11 +=================== + + * fix: cli handler to show non-string fields appropriately + * fix: cli using fatih/color to better support windows diff --git a/vendor/github.com/apex/log/LICENSE b/vendor/github.com/apex/log/LICENSE new file mode 100644 index 0000000000..af71800684 --- /dev/null +++ b/vendor/github.com/apex/log/LICENSE @@ -0,0 +1,22 @@ +(The MIT License) + +Copyright (c) 2015 TJ Holowaychuk tj@tjholowaychuk.com + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +'Software'), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/apex/log/Makefile b/vendor/github.com/apex/log/Makefile new file mode 100644 index 0000000000..f948e88ec1 --- /dev/null +++ b/vendor/github.com/apex/log/Makefile @@ -0,0 +1,2 @@ + +include github.com/tj/make/golang diff --git a/vendor/github.com/apex/log/Readme.md b/vendor/github.com/apex/log/Readme.md new file mode 100644 index 0000000000..aa5c621f5c --- /dev/null +++ b/vendor/github.com/apex/log/Readme.md @@ -0,0 +1,61 @@ + +![Structured logging for golang](assets/title.png) + +Package log implements a simple structured logging API inspired by Logrus, designed with centralization in mind. Read more on [Medium](https://medium.com/@tjholowaychuk/apex-log-e8d9627f4a9a#.rav8yhkud). + +## Handlers + +- __apexlogs__ – handler for [Apex Logs](https://apex.sh/logs/) +- __cli__ – human-friendly CLI output +- __discard__ – discards all logs +- __es__ – Elasticsearch handler +- __graylog__ – Graylog handler +- __json__ – JSON output handler +- __kinesis__ – AWS Kinesis handler +- __level__ – level filter handler +- __logfmt__ – logfmt plain-text formatter +- __memory__ – in-memory handler for tests +- __multi__ – fan-out to multiple handlers +- __papertrail__ – Papertrail handler +- __text__ – human-friendly colored output +- __delta__ – outputs the delta between log calls and spinner + +## Example + +Example using the [Apex Logs](https://apex.sh/logs/) handler. + +```go +package main + +import ( + "errors" + "time" + + "github.com/apex/log" +) + +func main() { + ctx := log.WithFields(log.Fields{ + "file": "something.png", + "type": "image/png", + "user": "tobi", + }) + + for range time.Tick(time.Millisecond * 200) { + ctx.Info("upload") + ctx.Info("upload complete") + ctx.Warn("upload retry") + ctx.WithError(errors.New("unauthorized")).Error("upload failed") + ctx.Errorf("failed to upload %s", "img.png") + } +} +``` + +--- + +[![Build Status](https://semaphoreci.com/api/v1/projects/d8a8b1c0-45b0-4b89-b066-99d788d0b94c/642077/badge.svg)](https://semaphoreci.com/tj/log) +[![GoDoc](https://godoc.org/github.com/apex/log?status.svg)](https://godoc.org/github.com/apex/log) +![](https://img.shields.io/badge/license-MIT-blue.svg) +![](https://img.shields.io/badge/status-stable-green.svg) + + diff --git a/vendor/github.com/apex/log/context.go b/vendor/github.com/apex/log/context.go new file mode 100644 index 0000000000..290ae41431 --- /dev/null +++ b/vendor/github.com/apex/log/context.go @@ -0,0 +1,19 @@ +package log + +import "context" + +// logKey is a private context key. +type logKey struct{} + +// NewContext returns a new context with logger. +func NewContext(ctx context.Context, v Interface) context.Context { + return context.WithValue(ctx, logKey{}, v) +} + +// FromContext returns the logger from context, or log.Log. +func FromContext(ctx context.Context) Interface { + if v, ok := ctx.Value(logKey{}).(Interface); ok { + return v + } + return Log +} diff --git a/vendor/github.com/apex/log/default.go b/vendor/github.com/apex/log/default.go new file mode 100644 index 0000000000..2213486238 --- /dev/null +++ b/vendor/github.com/apex/log/default.go @@ -0,0 +1,45 @@ +package log + +import ( + "bytes" + "fmt" + "log" + "sort" +) + +// field used for sorting. +type field struct { + Name string + Value interface{} +} + +// by sorts fields by name. +type byName []field + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } + +// handleStdLog outpouts to the stlib log. +func handleStdLog(e *Entry) error { + level := levelNames[e.Level] + + var fields []field + + for k, v := range e.Fields { + fields = append(fields, field{k, v}) + } + + sort.Sort(byName(fields)) + + var b bytes.Buffer + fmt.Fprintf(&b, "%5s %-25s", level, e.Message) + + for _, f := range fields { + fmt.Fprintf(&b, " %s=%v", f.Name, f.Value) + } + + log.Println(b.String()) + + return nil +} diff --git a/vendor/github.com/apex/log/doc.go b/vendor/github.com/apex/log/doc.go new file mode 100644 index 0000000000..0331e8e163 --- /dev/null +++ b/vendor/github.com/apex/log/doc.go @@ -0,0 +1,10 @@ +/* +Package log implements a simple structured logging API designed with few assumptions. Designed for +centralized logging solutions such as Kinesis which require encoding and decoding before fanning-out +to handlers. + +You may use this package with inline handlers, much like Logrus, however a centralized solution +is recommended so that apps do not need to be re-deployed to add or remove logging service +providers. +*/ +package log diff --git a/vendor/github.com/apex/log/entry.go b/vendor/github.com/apex/log/entry.go new file mode 100644 index 0000000000..d8982956a3 --- /dev/null +++ b/vendor/github.com/apex/log/entry.go @@ -0,0 +1,182 @@ +package log + +import ( + "fmt" + "os" + "strings" + "time" +) + +// assert interface compliance. +var _ Interface = (*Entry)(nil) + +// Now returns the current time. +var Now = time.Now + +// Entry represents a single log entry. +type Entry struct { + Logger *Logger `json:"-"` + Fields Fields `json:"fields"` + Level Level `json:"level"` + Timestamp time.Time `json:"timestamp"` + Message string `json:"message"` + start time.Time + fields []Fields +} + +// NewEntry returns a new entry for `log`. +func NewEntry(log *Logger) *Entry { + return &Entry{ + Logger: log, + } +} + +// WithFields returns a new entry with `fields` set. +func (e *Entry) WithFields(fields Fielder) *Entry { + f := []Fields{} + f = append(f, e.fields...) + f = append(f, fields.Fields()) + return &Entry{ + Logger: e.Logger, + fields: f, + } +} + +// WithField returns a new entry with the `key` and `value` set. +func (e *Entry) WithField(key string, value interface{}) *Entry { + return e.WithFields(Fields{key: value}) +} + +// WithDuration returns a new entry with the "duration" field set +// to the given duration in milliseconds. +func (e *Entry) WithDuration(d time.Duration) *Entry { + return e.WithField("duration", d.Milliseconds()) +} + +// WithError returns a new entry with the "error" set to `err`. +// +// The given error may implement .Fielder, if it does the method +// will add all its `.Fields()` into the returned entry. +func (e *Entry) WithError(err error) *Entry { + if err == nil { + return e + } + + ctx := e.WithField("error", err.Error()) + + if s, ok := err.(stackTracer); ok { + frame := s.StackTrace()[0] + + name := fmt.Sprintf("%n", frame) + file := fmt.Sprintf("%+s", frame) + line := fmt.Sprintf("%d", frame) + + parts := strings.Split(file, "\n\t") + if len(parts) > 1 { + file = parts[1] + } + + ctx = ctx.WithField("source", fmt.Sprintf("%s: %s:%s", name, file, line)) + } + + if f, ok := err.(Fielder); ok { + ctx = ctx.WithFields(f.Fields()) + } + + return ctx +} + +// Debug level message. +func (e *Entry) Debug(msg string) { + e.Logger.log(DebugLevel, e, msg) +} + +// Info level message. +func (e *Entry) Info(msg string) { + e.Logger.log(InfoLevel, e, msg) +} + +// Warn level message. +func (e *Entry) Warn(msg string) { + e.Logger.log(WarnLevel, e, msg) +} + +// Error level message. +func (e *Entry) Error(msg string) { + e.Logger.log(ErrorLevel, e, msg) +} + +// Fatal level message, followed by an exit. +func (e *Entry) Fatal(msg string) { + e.Logger.log(FatalLevel, e, msg) + os.Exit(1) +} + +// Debugf level formatted message. +func (e *Entry) Debugf(msg string, v ...interface{}) { + e.Debug(fmt.Sprintf(msg, v...)) +} + +// Infof level formatted message. +func (e *Entry) Infof(msg string, v ...interface{}) { + e.Info(fmt.Sprintf(msg, v...)) +} + +// Warnf level formatted message. +func (e *Entry) Warnf(msg string, v ...interface{}) { + e.Warn(fmt.Sprintf(msg, v...)) +} + +// Errorf level formatted message. +func (e *Entry) Errorf(msg string, v ...interface{}) { + e.Error(fmt.Sprintf(msg, v...)) +} + +// Fatalf level formatted message, followed by an exit. +func (e *Entry) Fatalf(msg string, v ...interface{}) { + e.Fatal(fmt.Sprintf(msg, v...)) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func (e *Entry) Trace(msg string) *Entry { + e.Info(msg) + v := e.WithFields(e.Fields) + v.Message = msg + v.start = time.Now() + return v +} + +// Stop should be used with Trace, to fire off the completion message. When +// an `err` is passed the "error" field is set, and the log level is error. +func (e *Entry) Stop(err *error) { + if err == nil || *err == nil { + e.WithDuration(time.Since(e.start)).Info(e.Message) + } else { + e.WithDuration(time.Since(e.start)).WithError(*err).Error(e.Message) + } +} + +// mergedFields returns the fields list collapsed into a single map. +func (e *Entry) mergedFields() Fields { + f := Fields{} + + for _, fields := range e.fields { + for k, v := range fields { + f[k] = v + } + } + + return f +} + +// finalize returns a copy of the Entry with Fields merged. +func (e *Entry) finalize(level Level, msg string) *Entry { + return &Entry{ + Logger: e.Logger, + Fields: e.mergedFields(), + Level: level, + Message: msg, + Timestamp: Now(), + } +} diff --git a/vendor/github.com/apex/log/go.mod b/vendor/github.com/apex/log/go.mod new file mode 100644 index 0000000000..870b30dbc6 --- /dev/null +++ b/vendor/github.com/apex/log/go.mod @@ -0,0 +1,32 @@ +module github.com/apex/log + +go 1.12 + +require ( + github.com/apex/logs v1.0.0 + github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a + github.com/aphistic/sweet v0.2.0 // indirect + github.com/aws/aws-sdk-go v1.20.6 + github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 + github.com/fatih/color v1.7.0 + github.com/go-logfmt/logfmt v0.4.0 + github.com/golang/protobuf v1.3.1 // indirect + github.com/google/uuid v1.1.1 // indirect + github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 // indirect + github.com/kr/pretty v0.2.0 // indirect + github.com/mattn/go-colorable v0.1.2 + github.com/pkg/errors v0.8.1 + github.com/rogpeppe/fastuuid v1.1.0 + github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 // indirect + github.com/smartystreets/gunit v1.0.0 // indirect + github.com/stretchr/testify v1.6.1 + github.com/tj/assert v0.0.3 + github.com/tj/go-buffer v1.1.0 + github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2 + github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b + github.com/tj/go-spin v1.1.0 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 // indirect + golang.org/x/text v0.3.2 // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect + gopkg.in/yaml.v2 v2.2.2 // indirect +) diff --git a/vendor/github.com/apex/log/go.sum b/vendor/github.com/apex/log/go.sum new file mode 100644 index 0000000000..c491698db4 --- /dev/null +++ b/vendor/github.com/apex/log/go.sum @@ -0,0 +1,117 @@ +github.com/apex/logs v1.0.0 h1:adOwhOTeXzZTnVuEK13wuJNBFutP0sOfutRS8NY+G6A= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a h1:2KLQMJ8msqoPHIPDufkxVcoTtcmE5+1sL9950m4R9Pk= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0 h1:I4z+fAUqvKfvZV/CHi5dV0QuwbmIvYYFDjG0Ss5QpAs= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/aws/aws-sdk-go v1.20.6 h1:kmy4Gvdlyez1fV4kw5RYxZzWKVyuHZHgPWeU/YvRsV4= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7 h1:K//n/AqR5HjG3qxbrBCL4vJPW0MVFSs9CPK1OOJdRME= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/fastuuid v1.1.0 h1:INyGLmTCMGFr6OVIb977ghJvABML2CMVjPoRfNDdYDo= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/smartystreets/assertions v1.0.0 h1:UVQPSSmc3qtTi+zPPkCXvZX9VvW/xT/NsRvKfwY81a8= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9 h1:hp2CYQUINdZMHdvTdXtPOY2ainKl4IoMcpAXEf2xj3Q= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/gunit v1.0.0 h1:RyPDUFcJbvtXlhJPk7v+wnxZRY2EUokhEYl2EJOPToI= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0 h1:Rw8kxzWo1mr6FSaYXjQELRe88y2KdfynXdnK72rdjtA= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.1.0 h1:Lo2OsPHlIxXF24zApe15AbK3bJLAOvkkxEA6Ux4c47M= +github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2 h1:eGaGNxrtoZf/mBURsnNQKDR7u50Klgcf2eFDQEnc8Bc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b h1:m74UWYy+HBs+jMFR9mdZU6shPewugMyH5+GV6LNgW8w= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0 h1:lhdWZsvImxvZ3q1C5OIB7d72DuOwP4O2NdBg9PyzNds= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734 h1:p/H982KKEjUnLJkM3tt/LemDnOc1GiZL5FCVlORJ5zo= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223 h1:DH4skfRX4EBpamg7iV4ZlCpblAHI6s6TDM39bFZumv8= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c h1:grhR+C34yXImVGp7EzNk+DTIk+323eIUWOmEevy6bDo= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/apex/log/interface.go b/vendor/github.com/apex/log/interface.go new file mode 100644 index 0000000000..9daa046532 --- /dev/null +++ b/vendor/github.com/apex/log/interface.go @@ -0,0 +1,22 @@ +package log + +import "time" + +// Interface represents the API of both Logger and Entry. +type Interface interface { + WithFields(Fielder) *Entry + WithField(string, interface{}) *Entry + WithDuration(time.Duration) *Entry + WithError(error) *Entry + Debug(string) + Info(string) + Warn(string) + Error(string) + Fatal(string) + Debugf(string, ...interface{}) + Infof(string, ...interface{}) + Warnf(string, ...interface{}) + Errorf(string, ...interface{}) + Fatalf(string, ...interface{}) + Trace(string) *Entry +} diff --git a/vendor/github.com/apex/log/levels.go b/vendor/github.com/apex/log/levels.go new file mode 100644 index 0000000000..7d43a43609 --- /dev/null +++ b/vendor/github.com/apex/log/levels.go @@ -0,0 +1,81 @@ +package log + +import ( + "bytes" + "errors" + "strings" +) + +// ErrInvalidLevel is returned if the severity level is invalid. +var ErrInvalidLevel = errors.New("invalid level") + +// Level of severity. +type Level int + +// Log levels. +const ( + InvalidLevel Level = iota - 1 + DebugLevel + InfoLevel + WarnLevel + ErrorLevel + FatalLevel +) + +var levelNames = [...]string{ + DebugLevel: "debug", + InfoLevel: "info", + WarnLevel: "warn", + ErrorLevel: "error", + FatalLevel: "fatal", +} + +var levelStrings = map[string]Level{ + "debug": DebugLevel, + "info": InfoLevel, + "warn": WarnLevel, + "warning": WarnLevel, + "error": ErrorLevel, + "fatal": FatalLevel, +} + +// String implementation. +func (l Level) String() string { + return levelNames[l] +} + +// MarshalJSON implementation. +func (l Level) MarshalJSON() ([]byte, error) { + return []byte(`"` + l.String() + `"`), nil +} + +// UnmarshalJSON implementation. +func (l *Level) UnmarshalJSON(b []byte) error { + v, err := ParseLevel(string(bytes.Trim(b, `"`))) + if err != nil { + return err + } + + *l = v + return nil +} + +// ParseLevel parses level string. +func ParseLevel(s string) (Level, error) { + l, ok := levelStrings[strings.ToLower(s)] + if !ok { + return InvalidLevel, ErrInvalidLevel + } + + return l, nil +} + +// MustParseLevel parses level string or panics. +func MustParseLevel(s string) Level { + l, err := ParseLevel(s) + if err != nil { + panic("invalid log level") + } + + return l +} diff --git a/vendor/github.com/apex/log/logger.go b/vendor/github.com/apex/log/logger.go new file mode 100644 index 0000000000..c7d9b730be --- /dev/null +++ b/vendor/github.com/apex/log/logger.go @@ -0,0 +1,156 @@ +package log + +import ( + stdlog "log" + "sort" + "time" +) + +// assert interface compliance. +var _ Interface = (*Logger)(nil) + +// Fielder is an interface for providing fields to custom types. +type Fielder interface { + Fields() Fields +} + +// Fields represents a map of entry level data used for structured logging. +type Fields map[string]interface{} + +// Fields implements Fielder. +func (f Fields) Fields() Fields { + return f +} + +// Get field value by name. +func (f Fields) Get(name string) interface{} { + return f[name] +} + +// Names returns field names sorted. +func (f Fields) Names() (v []string) { + for k := range f { + v = append(v, k) + } + + sort.Strings(v) + return +} + +// The HandlerFunc type is an adapter to allow the use of ordinary functions as +// log handlers. If f is a function with the appropriate signature, +// HandlerFunc(f) is a Handler object that calls f. +type HandlerFunc func(*Entry) error + +// HandleLog calls f(e). +func (f HandlerFunc) HandleLog(e *Entry) error { + return f(e) +} + +// Handler is used to handle log events, outputting them to +// stdio or sending them to remote services. See the "handlers" +// directory for implementations. +// +// It is left up to Handlers to implement thread-safety. +type Handler interface { + HandleLog(*Entry) error +} + +// Logger represents a logger with configurable Level and Handler. +type Logger struct { + Handler Handler + Level Level +} + +// WithFields returns a new entry with `fields` set. +func (l *Logger) WithFields(fields Fielder) *Entry { + return NewEntry(l).WithFields(fields.Fields()) +} + +// WithField returns a new entry with the `key` and `value` set. +// +// Note that the `key` should not have spaces in it - use camel +// case or underscores +func (l *Logger) WithField(key string, value interface{}) *Entry { + return NewEntry(l).WithField(key, value) +} + +// WithDuration returns a new entry with the "duration" field set +// to the given duration in milliseconds. +func (l *Logger) WithDuration(d time.Duration) *Entry { + return NewEntry(l).WithDuration(d) +} + +// WithError returns a new entry with the "error" set to `err`. +func (l *Logger) WithError(err error) *Entry { + return NewEntry(l).WithError(err) +} + +// Debug level message. +func (l *Logger) Debug(msg string) { + NewEntry(l).Debug(msg) +} + +// Info level message. +func (l *Logger) Info(msg string) { + NewEntry(l).Info(msg) +} + +// Warn level message. +func (l *Logger) Warn(msg string) { + NewEntry(l).Warn(msg) +} + +// Error level message. +func (l *Logger) Error(msg string) { + NewEntry(l).Error(msg) +} + +// Fatal level message, followed by an exit. +func (l *Logger) Fatal(msg string) { + NewEntry(l).Fatal(msg) +} + +// Debugf level formatted message. +func (l *Logger) Debugf(msg string, v ...interface{}) { + NewEntry(l).Debugf(msg, v...) +} + +// Infof level formatted message. +func (l *Logger) Infof(msg string, v ...interface{}) { + NewEntry(l).Infof(msg, v...) +} + +// Warnf level formatted message. +func (l *Logger) Warnf(msg string, v ...interface{}) { + NewEntry(l).Warnf(msg, v...) +} + +// Errorf level formatted message. +func (l *Logger) Errorf(msg string, v ...interface{}) { + NewEntry(l).Errorf(msg, v...) +} + +// Fatalf level formatted message, followed by an exit. +func (l *Logger) Fatalf(msg string, v ...interface{}) { + NewEntry(l).Fatalf(msg, v...) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func (l *Logger) Trace(msg string) *Entry { + return NewEntry(l).Trace(msg) +} + +// log the message, invoking the handler. We clone the entry here +// to bypass the overhead in Entry methods when the level is not +// met. +func (l *Logger) log(level Level, e *Entry, msg string) { + if level < l.Level { + return + } + + if err := l.Handler.HandleLog(e.finalize(level, msg)); err != nil { + stdlog.Printf("error logging: %s", err) + } +} diff --git a/vendor/github.com/apex/log/pkg.go b/vendor/github.com/apex/log/pkg.go new file mode 100644 index 0000000000..872eae6c27 --- /dev/null +++ b/vendor/github.com/apex/log/pkg.go @@ -0,0 +1,108 @@ +package log + +import "time" + +// singletons ftw? +var Log Interface = &Logger{ + Handler: HandlerFunc(handleStdLog), + Level: InfoLevel, +} + +// SetHandler sets the handler. This is not thread-safe. +// The default handler outputs to the stdlib log. +func SetHandler(h Handler) { + if logger, ok := Log.(*Logger); ok { + logger.Handler = h + } +} + +// SetLevel sets the log level. This is not thread-safe. +func SetLevel(l Level) { + if logger, ok := Log.(*Logger); ok { + logger.Level = l + } +} + +// SetLevelFromString sets the log level from a string, panicing when invalid. This is not thread-safe. +func SetLevelFromString(s string) { + if logger, ok := Log.(*Logger); ok { + logger.Level = MustParseLevel(s) + } +} + +// WithFields returns a new entry with `fields` set. +func WithFields(fields Fielder) *Entry { + return Log.WithFields(fields) +} + +// WithField returns a new entry with the `key` and `value` set. +func WithField(key string, value interface{}) *Entry { + return Log.WithField(key, value) +} + +// WithDuration returns a new entry with the "duration" field set +// to the given duration in milliseconds. +func WithDuration(d time.Duration) *Entry { + return Log.WithDuration(d) +} + +// WithError returns a new entry with the "error" set to `err`. +func WithError(err error) *Entry { + return Log.WithError(err) +} + +// Debug level message. +func Debug(msg string) { + Log.Debug(msg) +} + +// Info level message. +func Info(msg string) { + Log.Info(msg) +} + +// Warn level message. +func Warn(msg string) { + Log.Warn(msg) +} + +// Error level message. +func Error(msg string) { + Log.Error(msg) +} + +// Fatal level message, followed by an exit. +func Fatal(msg string) { + Log.Fatal(msg) +} + +// Debugf level formatted message. +func Debugf(msg string, v ...interface{}) { + Log.Debugf(msg, v...) +} + +// Infof level formatted message. +func Infof(msg string, v ...interface{}) { + Log.Infof(msg, v...) +} + +// Warnf level formatted message. +func Warnf(msg string, v ...interface{}) { + Log.Warnf(msg, v...) +} + +// Errorf level formatted message. +func Errorf(msg string, v ...interface{}) { + Log.Errorf(msg, v...) +} + +// Fatalf level formatted message, followed by an exit. +func Fatalf(msg string, v ...interface{}) { + Log.Fatalf(msg, v...) +} + +// Trace returns a new entry with a Stop method to fire off +// a corresponding completion log, useful with defer. +func Trace(msg string) *Entry { + return Log.Trace(msg) +} diff --git a/vendor/github.com/apex/log/stack.go b/vendor/github.com/apex/log/stack.go new file mode 100644 index 0000000000..57efe3262e --- /dev/null +++ b/vendor/github.com/apex/log/stack.go @@ -0,0 +1,8 @@ +package log + +import "github.com/pkg/errors" + +// stackTracer interface. +type stackTracer interface { + StackTrace() errors.StackTrace +} diff --git a/vendor/github.com/bits-and-blooms/bitset/.gitignore b/vendor/github.com/bits-and-blooms/bitset/.gitignore new file mode 100644 index 0000000000..5c204d28b0 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +target diff --git a/vendor/github.com/bits-and-blooms/bitset/.travis.yml b/vendor/github.com/bits-and-blooms/bitset/.travis.yml new file mode 100644 index 0000000000..094aa5ce07 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/.travis.yml @@ -0,0 +1,37 @@ +language: go + +sudo: false + +branches: + except: + - release + +branches: + only: + - master + - travis + +go: + - "1.11.x" + - tip + +matrix: + allow_failures: + - go: tip + +before_install: + - if [ -n "$GH_USER" ]; then git config --global github.user ${GH_USER}; fi; + - if [ -n "$GH_TOKEN" ]; then git config --global github.token ${GH_TOKEN}; fi; + - go get github.com/mattn/goveralls + +before_script: + - make deps + +script: + - make qa + +after_failure: + - cat ./target/test/report.xml + +after_success: + - if [ "$TRAVIS_GO_VERSION" = "1.11.1" ]; then $HOME/gopath/bin/goveralls -covermode=count -coverprofile=target/report/coverage.out -service=travis-ci; fi; diff --git a/vendor/github.com/bits-and-blooms/bitset/LICENSE b/vendor/github.com/bits-and-blooms/bitset/LICENSE new file mode 100644 index 0000000000..59cab8a939 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014 Will Fitzgerald. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/bits-and-blooms/bitset/README.md b/vendor/github.com/bits-and-blooms/bitset/README.md new file mode 100644 index 0000000000..97e83071e4 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/README.md @@ -0,0 +1,93 @@ +# bitset + +*Go language library to map between non-negative integers and boolean values* + +[![Test](https://github.com/bits-and-blooms/bitset/workflows/Test/badge.svg)](https://github.com/willf/bitset/actions?query=workflow%3ATest) +[![Go Report Card](https://goreportcard.com/badge/github.com/willf/bitset)](https://goreportcard.com/report/github.com/willf/bitset) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/bits-and-blooms/bitset?tab=doc)](https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc) + + +## Description + +Package bitset implements bitsets, a mapping between non-negative integers and boolean values. +It should be more efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing individual integers. + +But it also provides set intersection, union, difference, complement, and symmetric operations, as well as tests to check whether any, all, or no bits are set, and querying a bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the memory allocation is approximately Max bits, where Max is the largest set bit. BitSets are never shrunk. On creation, a hint can be given for the number of bits that will be used. + +Many of the methods, including Set, Clear, and Flip, return a BitSet pointer, which allows for chaining. + +### Example use: + +```go +package main + +import ( + "fmt" + "math/rand" + + "github.com/bits-and-blooms/bitset" +) + +func main() { + fmt.Printf("Hello from BitSet!\n") + var b bitset.BitSet + // play some Go Fish + for i := 0; i < 100; i++ { + card1 := uint(rand.Intn(52)) + card2 := uint(rand.Intn(52)) + b.Set(card1) + if b.Test(card2) { + fmt.Println("Go Fish!") + } + b.Clear(card1) + } + + // Chaining + b.Set(10).Set(11) + + for i, e := b.NextSet(0); e; i, e = b.NextSet(i + 1) { + fmt.Println("The following bit is set:", i) + } + if b.Intersection(bitset.New(100).Set(10)).Count() == 1 { + fmt.Println("Intersection works.") + } else { + fmt.Println("Intersection doesn't work???") + } +} +``` + +As an alternative to BitSets, one should check out the 'big' package, which provides a (less set-theoretical) view of bitsets. + +Package documentation is at: https://pkg.go.dev/github.com/bits-and-blooms/bitset?tab=doc + +## Memory Usage + +The memory usage of a bitset using N bits is at least N/8 bytes. The number of bits in a bitset is at least as large as one plus the greatest bit index you have accessed. Thus it is possible to run out of memory while using a bitset. If you have lots of bits, you might prefer compressed bitsets, like the [Roaring bitmaps](http://roaringbitmap.org) and its [Go implementation](https://github.com/RoaringBitmap/roaring). + +## Implementation Note + +Go 1.9 introduced a native `math/bits` library. We provide backward compatibility to Go 1.7, which might be removed. + +It is possible that a later version will match the `math/bits` return signature for counts (which is `int`, rather than our library's `unit64`). If so, the version will be bumped. + +## Installation + +```bash +go get github.com/bits-and-blooms/bitset +``` + +## Contributing + +If you wish to contribute to this project, please branch and issue a pull request against master ("[GitHub Flow](https://guides.github.com/introduction/flow/)") + +## Running all tests + +Before committing the code, please check if it passes tests, has adequate coverage, etc. +```bash +go test +go test -cover +``` diff --git a/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml new file mode 100644 index 0000000000..f9b2959184 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/azure-pipelines.yml @@ -0,0 +1,39 @@ +# Go +# Build your Go project. +# Add steps that test, save build artifacts, deploy, and more: +# https://docs.microsoft.com/azure/devops/pipelines/languages/go + +trigger: +- master + +pool: + vmImage: 'Ubuntu-16.04' + +variables: + GOBIN: '$(GOPATH)/bin' # Go binaries path + GOROOT: '/usr/local/go1.11' # Go installation path + GOPATH: '$(system.defaultWorkingDirectory)/gopath' # Go workspace path + modulePath: '$(GOPATH)/src/github.com/$(build.repository.name)' # Path to the module's code + +steps: +- script: | + mkdir -p '$(GOBIN)' + mkdir -p '$(GOPATH)/pkg' + mkdir -p '$(modulePath)' + shopt -s extglob + shopt -s dotglob + mv !(gopath) '$(modulePath)' + echo '##vso[task.prependpath]$(GOBIN)' + echo '##vso[task.prependpath]$(GOROOT)/bin' + displayName: 'Set up the Go workspace' + +- script: | + go version + go get -v -t -d ./... + if [ -f Gopkg.toml ]; then + curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh + dep ensure + fi + go build -v . + workingDirectory: '$(modulePath)' + displayName: 'Get dependencies, then build' diff --git a/vendor/github.com/bits-and-blooms/bitset/bitset.go b/vendor/github.com/bits-and-blooms/bitset/bitset.go new file mode 100644 index 0000000000..d688806a54 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/bitset.go @@ -0,0 +1,952 @@ +/* +Package bitset implements bitsets, a mapping +between non-negative integers and boolean values. It should be more +efficient than map[uint] bool. + +It provides methods for setting, clearing, flipping, and testing +individual integers. + +But it also provides set intersection, union, difference, +complement, and symmetric operations, as well as tests to +check whether any, all, or no bits are set, and querying a +bitset's current length and number of positive bits. + +BitSets are expanded to the size of the largest set bit; the +memory allocation is approximately Max bits, where Max is +the largest set bit. BitSets are never shrunk. On creation, +a hint can be given for the number of bits that will be used. + +Many of the methods, including Set,Clear, and Flip, return +a BitSet pointer, which allows for chaining. + +Example use: + + import "bitset" + var b BitSet + b.Set(10).Set(11) + if b.Test(1000) { + b.Clear(1000) + } + if B.Intersection(bitset.New(100).Set(10)).Count() > 1 { + fmt.Println("Intersection works.") + } + +As an alternative to BitSets, one should check out the 'big' package, +which provides a (less set-theoretical) view of bitsets. + +*/ +package bitset + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "strconv" +) + +// the wordSize of a bit set +const wordSize = uint(64) + +// log2WordSize is lg(wordSize) +const log2WordSize = uint(6) + +// allBits has every bit set +const allBits uint64 = 0xffffffffffffffff + +// default binary BigEndian +var binaryOrder binary.ByteOrder = binary.BigEndian + +// default json encoding base64.URLEncoding +var base64Encoding = base64.URLEncoding + +// Base64StdEncoding Marshal/Unmarshal BitSet with base64.StdEncoding(Default: base64.URLEncoding) +func Base64StdEncoding() { base64Encoding = base64.StdEncoding } + +// LittleEndian Marshal/Unmarshal Binary as Little Endian(Default: binary.BigEndian) +func LittleEndian() { binaryOrder = binary.LittleEndian } + +// A BitSet is a set of bits. The zero value of a BitSet is an empty set of length 0. +type BitSet struct { + length uint + set []uint64 +} + +// Error is used to distinguish errors (panics) generated in this package. +type Error string + +// safeSet will fixup b.set to be non-nil and return the field value +func (b *BitSet) safeSet() []uint64 { + if b.set == nil { + b.set = make([]uint64, wordsNeeded(0)) + } + return b.set +} + +// From is a constructor used to create a BitSet from an array of integers +func From(buf []uint64) *BitSet { + return &BitSet{uint(len(buf)) * 64, buf} +} + +// Bytes returns the bitset as array of integers +func (b *BitSet) Bytes() []uint64 { + return b.set +} + +// wordsNeeded calculates the number of words needed for i bits +func wordsNeeded(i uint) int { + if i > (Cap() - wordSize + 1) { + return int(Cap() >> log2WordSize) + } + return int((i + (wordSize - 1)) >> log2WordSize) +} + +// New creates a new BitSet with a hint that length bits will be required +func New(length uint) (bset *BitSet) { + defer func() { + if r := recover(); r != nil { + bset = &BitSet{ + 0, + make([]uint64, 0), + } + } + }() + + bset = &BitSet{ + length, + make([]uint64, wordsNeeded(length)), + } + + return bset +} + +// Cap returns the total possible capacity, or number of bits +func Cap() uint { + return ^uint(0) +} + +// Len returns the number of bits in the BitSet. +// Note the difference to method Count, see example. +func (b *BitSet) Len() uint { + return b.length +} + +// extendSetMaybe adds additional words to incorporate new bits if needed +func (b *BitSet) extendSetMaybe(i uint) { + if i >= b.length { // if we need more bits, make 'em + if i >= Cap() { + panic("You are exceeding the capacity") + } + nsize := wordsNeeded(i + 1) + if b.set == nil { + b.set = make([]uint64, nsize) + } else if cap(b.set) >= nsize { + b.set = b.set[:nsize] // fast resize + } else if len(b.set) < nsize { + newset := make([]uint64, nsize, 2*nsize) // increase capacity 2x + copy(newset, b.set) + b.set = newset + } + b.length = i + 1 + } +} + +// Test whether bit i is set. +func (b *BitSet) Test(i uint) bool { + if i >= b.length { + return false + } + return b.set[i>>log2WordSize]&(1<<(i&(wordSize-1))) != 0 +} + +// Set bit i to 1, the capacity of the bitset is automatically +// increased accordingly. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) Set(i uint) *BitSet { + b.extendSetMaybe(i) + b.set[i>>log2WordSize] |= 1 << (i & (wordSize - 1)) + return b +} + +// Clear bit i to 0 +func (b *BitSet) Clear(i uint) *BitSet { + if i >= b.length { + return b + } + b.set[i>>log2WordSize] &^= 1 << (i & (wordSize - 1)) + return b +} + +// SetTo sets bit i to value. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) SetTo(i uint, value bool) *BitSet { + if value { + return b.Set(i) + } + return b.Clear(i) +} + +// Flip bit at i. +// If i>= Cap(), this function will panic. +// Warning: using a very large value for 'i' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) Flip(i uint) *BitSet { + if i >= b.length { + return b.Set(i) + } + b.set[i>>log2WordSize] ^= 1 << (i & (wordSize - 1)) + return b +} + +// FlipRange bit in [start, end). +// If end>= Cap(), this function will panic. +// Warning: using a very large value for 'end' +// may lead to a memory shortage and a panic: the caller is responsible +// for providing sensible parameters in line with their memory capacity. +func (b *BitSet) FlipRange(start, end uint) *BitSet { + if start >= end { + return b + } + + b.extendSetMaybe(end - 1) + var startWord uint = start >> log2WordSize + var endWord uint = end >> log2WordSize + b.set[startWord] ^= ^(^uint64(0) << (start & (wordSize - 1))) + for i := startWord; i < endWord; i++ { + b.set[i] = ^b.set[i] + } + b.set[endWord] ^= ^uint64(0) >> (-end & (wordSize - 1)) + return b +} + +// Shrink shrinks BitSet so that the provided value is the last possible +// set value. It clears all bits > the provided index and reduces the size +// and length of the set. +// +// Note that the parameter value is not the new length in bits: it is the +// maximal value that can be stored in the bitset after the function call. +// The new length in bits is the parameter value + 1. Thus it is not possible +// to use this function to set the length to 0, the minimal value of the length +// after this function call is 1. +// +// A new slice is allocated to store the new bits, so you may see an increase in +// memory usage until the GC runs. Normally this should not be a problem, but if you +// have an extremely large BitSet its important to understand that the old BitSet will +// remain in memory until the GC frees it. +func (b *BitSet) Shrink(lastbitindex uint) *BitSet { + length := lastbitindex + 1 + idx := wordsNeeded(length) + if idx > len(b.set) { + return b + } + shrunk := make([]uint64, idx) + copy(shrunk, b.set[:idx]) + b.set = shrunk + b.length = length + b.set[idx-1] &= (allBits >> (uint64(64) - uint64(length&(wordSize-1)))) + return b +} + +// Compact shrinks BitSet to so that we preserve all set bits, while minimizing +// memory usage. Compact calls Shrink. +func (b *BitSet) Compact() *BitSet { + idx := len(b.set) - 1 + for ; idx >= 0 && b.set[idx] == 0; idx-- { + } + newlength := uint((idx + 1) << log2WordSize) + if newlength >= b.length { + return b // nothing to do + } + if newlength > 0 { + return b.Shrink(newlength - 1) + } + // We preserve one word + return b.Shrink(63) +} + +// InsertAt takes an index which indicates where a bit should be +// inserted. Then it shifts all the bits in the set to the left by 1, starting +// from the given index position, and sets the index position to 0. +// +// Depending on the size of your BitSet, and where you are inserting the new entry, +// this method could be extremely slow and in some cases might cause the entire BitSet +// to be recopied. +func (b *BitSet) InsertAt(idx uint) *BitSet { + insertAtElement := (idx >> log2WordSize) + + // if length of set is a multiple of wordSize we need to allocate more space first + if b.isLenExactMultiple() { + b.set = append(b.set, uint64(0)) + } + + var i uint + for i = uint(len(b.set) - 1); i > insertAtElement; i-- { + // all elements above the position where we want to insert can simply by shifted + b.set[i] <<= 1 + + // we take the most significant bit of the previous element and set it as + // the least significant bit of the current element + b.set[i] |= (b.set[i-1] & 0x8000000000000000) >> 63 + } + + // generate a mask to extract the data that we need to shift left + // within the element where we insert a bit + dataMask := ^(uint64(1)< 0x40000 { + buffer.WriteString("...") + break + } + buffer.WriteString(strconv.FormatInt(int64(i), 10)) + i, e = b.NextSet(i + 1) + if e { + buffer.WriteString(",") + } + } + buffer.WriteString("}") + return buffer.String() +} + +// DeleteAt deletes the bit at the given index position from +// within the bitset +// All the bits residing on the left of the deleted bit get +// shifted right by 1 +// The running time of this operation may potentially be +// relatively slow, O(length) +func (b *BitSet) DeleteAt(i uint) *BitSet { + // the index of the slice element where we'll delete a bit + deleteAtElement := i >> log2WordSize + + // generate a mask for the data that needs to be shifted right + // within that slice element that gets modified + dataMask := ^((uint64(1) << (i & (wordSize - 1))) - 1) + + // extract the data that we'll shift right from the slice element + data := b.set[deleteAtElement] & dataMask + + // set the masked area to 0 while leaving the rest as it is + b.set[deleteAtElement] &= ^dataMask + + // shift the previously extracted data to the right and then + // set it in the previously masked area + b.set[deleteAtElement] |= (data >> 1) & dataMask + + // loop over all the consecutive slice elements to copy each + // lowest bit into the highest position of the previous element, + // then shift the entire content to the right by 1 + for i := int(deleteAtElement) + 1; i < len(b.set); i++ { + b.set[i-1] |= (b.set[i] & 1) << 63 + b.set[i] >>= 1 + } + + b.length = b.length - 1 + + return b +} + +// NextSet returns the next bit set from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no set bit found) +// for i,e := v.NextSet(0); e; i,e = v.NextSet(i + 1) {...} +// +// Users concerned with performance may want to use NextSetMany to +// retrieve several values at once. +func (b *BitSet) NextSet(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + if w != 0 { + return i + trailingZeroes64(w), true + } + x = x + 1 + for x < len(b.set) { + if b.set[x] != 0 { + return uint(x)*wordSize + trailingZeroes64(b.set[x]), true + } + x = x + 1 + + } + return 0, false +} + +// NextSetMany returns many next bit sets from the specified index, +// including possibly the current index and up to cap(buffer). +// If the returned slice has len zero, then no more set bits were found +// +// buffer := make([]uint, 256) // this should be reused +// j := uint(0) +// j, buffer = bitmap.NextSetMany(j, buffer) +// for ; len(buffer) > 0; j, buffer = bitmap.NextSetMany(j,buffer) { +// for k := range buffer { +// do something with buffer[k] +// } +// j += 1 +// } +// +// +// It is possible to retrieve all set bits as follow: +// +// indices := make([]uint, bitmap.Count()) +// bitmap.NextSetMany(0, indices) +// +// However if bitmap.Count() is large, it might be preferable to +// use several calls to NextSetMany, for performance reasons. +func (b *BitSet) NextSetMany(i uint, buffer []uint) (uint, []uint) { + myanswer := buffer + capacity := cap(buffer) + x := int(i >> log2WordSize) + if x >= len(b.set) || capacity == 0 { + return 0, myanswer[:0] + } + skip := i & (wordSize - 1) + word := b.set[x] >> skip + myanswer = myanswer[:capacity] + size := int(0) + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + i + size++ + if size == capacity { + goto End + } + word = word ^ t + } + x++ + for idx, word := range b.set[x:] { + for word != 0 { + r := trailingZeroes64(word) + t := word & ((^word) + 1) + myanswer[size] = r + (uint(x+idx) << 6) + size++ + if size == capacity { + goto End + } + word = word ^ t + } + } +End: + if size > 0 { + return myanswer[size-1], myanswer[:size] + } + return 0, myanswer[:0] +} + +// NextClear returns the next clear bit from the specified index, +// including possibly the current index +// along with an error code (true = valid, false = no bit found i.e. all bits are set) +func (b *BitSet) NextClear(i uint) (uint, bool) { + x := int(i >> log2WordSize) + if x >= len(b.set) { + return 0, false + } + w := b.set[x] + w = w >> (i & (wordSize - 1)) + wA := allBits >> (i & (wordSize - 1)) + index := i + trailingZeroes64(^w) + if w != wA && index < b.length { + return index, true + } + x++ + for x < len(b.set) { + index = uint(x)*wordSize + trailingZeroes64(^b.set[x]) + if b.set[x] != allBits && index < b.length { + return index, true + } + x++ + } + return 0, false +} + +// ClearAll clears the entire BitSet +func (b *BitSet) ClearAll() *BitSet { + if b != nil && b.set != nil { + for i := range b.set { + b.set[i] = 0 + } + } + return b +} + +// wordCount returns the number of words used in a bit set +func (b *BitSet) wordCount() int { + return len(b.set) +} + +// Clone this BitSet +func (b *BitSet) Clone() *BitSet { + c := New(b.length) + if b.set != nil { // Clone should not modify current object + copy(c.set, b.set) + } + return c +} + +// Copy into a destination BitSet +// Returning the size of the destination BitSet +// like array copy +func (b *BitSet) Copy(c *BitSet) (count uint) { + if c == nil { + return + } + if b.set != nil { // Copy should not modify current object + copy(c.set, b.set) + } + count = c.length + if b.length < c.length { + count = b.length + } + return +} + +// Count (number of set bits). +// Also known as "popcount" or "population count". +func (b *BitSet) Count() uint { + if b != nil && b.set != nil { + return uint(popcntSlice(b.set)) + } + return 0 +} + +// Equal tests the equivalence of two BitSets. +// False if they are of different sizes, otherwise true +// only if all the same bits are set +func (b *BitSet) Equal(c *BitSet) bool { + if c == nil || b == nil { + return c == b + } + if b.length != c.length { + return false + } + if b.length == 0 { // if they have both length == 0, then could have nil set + return true + } + // testing for equality shoud not transform the bitset (no call to safeSet) + + for p, v := range b.set { + if c.set[p] != v { + return false + } + } + return true +} + +func panicIfNull(b *BitSet) { + if b == nil { + panic(Error("BitSet must not be null")) + } +} + +// Difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) Difference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + result = b.Clone() // clone b (in case b is bigger than compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + result.set[i] = b.set[i] &^ compare.set[i] + } + return +} + +// DifferenceCardinality computes the cardinality of the differnce +func (b *BitSet) DifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + cnt := uint64(0) + cnt += popcntMaskSlice(b.set[:l], compare.set[:l]) + cnt += popcntSlice(b.set[l:]) + return uint(cnt) +} + +// InPlaceDifference computes the difference of base set and other set +// This is the BitSet equivalent of &^ (and not) +func (b *BitSet) InPlaceDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &^= compare.set[i] + } +} + +// Convenience function: return two bitsets ordered by +// increasing length. Note: neither can be nil +func sortByLength(a *BitSet, b *BitSet) (ap *BitSet, bp *BitSet) { + if a.length <= b.length { + ap, bp = a, b + } else { + ap, bp = b, a + } + return +} + +// Intersection of base set and other set +// This is the BitSet equivalent of & (and) +func (b *BitSet) Intersection(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = New(b.length) + for i, word := range b.set { + result.set[i] = word & compare.set[i] + } + return +} + +// IntersectionCardinality computes the cardinality of the union +func (b *BitSet) IntersectionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntAndSlice(b.set, compare.set) + return uint(cnt) +} + +// InPlaceIntersection destructively computes the intersection of +// base set and the compare set. +// This is the BitSet equivalent of & (and) +func (b *BitSet) InPlaceIntersection(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + for i := 0; i < l; i++ { + b.set[i] &= compare.set[i] + } + for i := l; i < len(b.set); i++ { + b.set[i] = 0 + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } +} + +// Union of base set and other set +// This is the BitSet equivalent of | (or) +func (b *BitSet) Union(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word | compare.set[i] + } + return +} + +// UnionCardinality computes the cardinality of the uniton of the base set +// and the compare set. +func (b *BitSet) UnionCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntOrSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceUnion creates the destructive union of base set and compare set. +// This is the BitSet equivalent of | (or). +func (b *BitSet) InPlaceUnion(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] |= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) SymmetricDifference(compare *BitSet) (result *BitSet) { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + // compare is bigger, so clone it + result = compare.Clone() + for i, word := range b.set { + result.set[i] = word ^ compare.set[i] + } + return +} + +// SymmetricDifferenceCardinality computes the cardinality of the symmetric difference +func (b *BitSet) SymmetricDifferenceCardinality(compare *BitSet) uint { + panicIfNull(b) + panicIfNull(compare) + b, compare = sortByLength(b, compare) + cnt := popcntXorSlice(b.set, compare.set) + if len(compare.set) > len(b.set) { + cnt += popcntSlice(compare.set[len(b.set):]) + } + return uint(cnt) +} + +// InPlaceSymmetricDifference creates the destructive SymmetricDifference of base set and other set +// This is the BitSet equivalent of ^ (xor) +func (b *BitSet) InPlaceSymmetricDifference(compare *BitSet) { + panicIfNull(b) + panicIfNull(compare) + l := int(compare.wordCount()) + if l > int(b.wordCount()) { + l = int(b.wordCount()) + } + if compare.length > 0 { + b.extendSetMaybe(compare.length - 1) + } + for i := 0; i < l; i++ { + b.set[i] ^= compare.set[i] + } + if len(compare.set) > l { + for i := l; i < len(compare.set); i++ { + b.set[i] = compare.set[i] + } + } +} + +// Is the length an exact multiple of word sizes? +func (b *BitSet) isLenExactMultiple() bool { + return b.length%wordSize == 0 +} + +// Clean last word by setting unused bits to 0 +func (b *BitSet) cleanLastWord() { + if !b.isLenExactMultiple() { + b.set[len(b.set)-1] &= allBits >> (wordSize - b.length%wordSize) + } +} + +// Complement computes the (local) complement of a biset (up to length bits) +func (b *BitSet) Complement() (result *BitSet) { + panicIfNull(b) + result = New(b.length) + for i, word := range b.set { + result.set[i] = ^word + } + result.cleanLastWord() + return +} + +// All returns true if all bits are set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) All() bool { + panicIfNull(b) + return b.Count() == b.length +} + +// None returns true if no bit is set, false otherwise. Returns true for +// empty sets. +func (b *BitSet) None() bool { + panicIfNull(b) + if b != nil && b.set != nil { + for _, word := range b.set { + if word > 0 { + return false + } + } + return true + } + return true +} + +// Any returns true if any bit is set, false otherwise +func (b *BitSet) Any() bool { + panicIfNull(b) + return !b.None() +} + +// IsSuperSet returns true if this is a superset of the other set +func (b *BitSet) IsSuperSet(other *BitSet) bool { + for i, e := other.NextSet(0); e; i, e = other.NextSet(i + 1) { + if !b.Test(i) { + return false + } + } + return true +} + +// IsStrictSuperSet returns true if this is a strict superset of the other set +func (b *BitSet) IsStrictSuperSet(other *BitSet) bool { + return b.Count() > other.Count() && b.IsSuperSet(other) +} + +// DumpAsBits dumps a bit set as a string of bits +func (b *BitSet) DumpAsBits() string { + if b.set == nil { + return "." + } + buffer := bytes.NewBufferString("") + i := len(b.set) - 1 + for ; i >= 0; i-- { + fmt.Fprintf(buffer, "%064b.", b.set[i]) + } + return buffer.String() +} + +// BinaryStorageSize returns the binary storage requirements +func (b *BitSet) BinaryStorageSize() int { + return binary.Size(uint64(0)) + binary.Size(b.set) +} + +// WriteTo writes a BitSet to a stream +func (b *BitSet) WriteTo(stream io.Writer) (int64, error) { + length := uint64(b.length) + + // Write length + err := binary.Write(stream, binaryOrder, length) + if err != nil { + return 0, err + } + + // Write set + err = binary.Write(stream, binaryOrder, b.set) + return int64(b.BinaryStorageSize()), err +} + +// ReadFrom reads a BitSet from a stream written using WriteTo +func (b *BitSet) ReadFrom(stream io.Reader) (int64, error) { + var length uint64 + + // Read length first + err := binary.Read(stream, binaryOrder, &length) + if err != nil { + return 0, err + } + newset := New(uint(length)) + + if uint64(newset.length) != length { + return 0, errors.New("unmarshalling error: type mismatch") + } + + // Read remaining bytes as set + err = binary.Read(stream, binaryOrder, newset.set) + if err != nil { + return 0, err + } + + *b = *newset + return int64(b.BinaryStorageSize()), nil +} + +// MarshalBinary encodes a BitSet into a binary form and returns the result. +func (b *BitSet) MarshalBinary() ([]byte, error) { + var buf bytes.Buffer + writer := bufio.NewWriter(&buf) + + _, err := b.WriteTo(writer) + if err != nil { + return []byte{}, err + } + + err = writer.Flush() + + return buf.Bytes(), err +} + +// UnmarshalBinary decodes the binary form generated by MarshalBinary. +func (b *BitSet) UnmarshalBinary(data []byte) error { + buf := bytes.NewReader(data) + reader := bufio.NewReader(buf) + + _, err := b.ReadFrom(reader) + + return err +} + +// MarshalJSON marshals a BitSet as a JSON structure +func (b *BitSet) MarshalJSON() ([]byte, error) { + buffer := bytes.NewBuffer(make([]byte, 0, b.BinaryStorageSize())) + _, err := b.WriteTo(buffer) + if err != nil { + return nil, err + } + + // URLEncode all bytes + return json.Marshal(base64Encoding.EncodeToString(buffer.Bytes())) +} + +// UnmarshalJSON unmarshals a BitSet from JSON created using MarshalJSON +func (b *BitSet) UnmarshalJSON(data []byte) error { + // Unmarshal as string + var s string + err := json.Unmarshal(data, &s) + if err != nil { + return err + } + + // URLDecode string + buf, err := base64Encoding.DecodeString(s) + if err != nil { + return err + } + + _, err = b.ReadFrom(bytes.NewReader(buf)) + return err +} diff --git a/vendor/github.com/bits-and-blooms/bitset/go.mod b/vendor/github.com/bits-and-blooms/bitset/go.mod new file mode 100644 index 0000000000..c43e4522b7 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/go.mod @@ -0,0 +1,3 @@ +module github.com/bits-and-blooms/bitset + +go 1.14 diff --git a/vendor/github.com/bits-and-blooms/bitset/go.sum b/vendor/github.com/bits-and-blooms/bitset/go.sum new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt.go b/vendor/github.com/bits-and-blooms/bitset/popcnt.go new file mode 100644 index 0000000000..76577a8382 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt.go @@ -0,0 +1,53 @@ +package bitset + +// bit population count, take from +// https://code.google.com/p/go/issues/detail?id=4988#c11 +// credit: https://code.google.com/u/arnehormann/ +func popcount(x uint64) (n uint64) { + x -= (x >> 1) & 0x5555555555555555 + x = (x>>2)&0x3333333333333333 + x&0x3333333333333333 + x += x >> 4 + x &= 0x0f0f0f0f0f0f0f0f + x *= 0x0101010101010101 + return x >> 56 +} + +func popcntSliceGo(s []uint64) uint64 { + cnt := uint64(0) + for _, x := range s { + cnt += popcount(x) + } + return cnt +} + +func popcntMaskSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] &^ m[i]) + } + return cnt +} + +func popcntAndSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] & m[i]) + } + return cnt +} + +func popcntOrSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] | m[i]) + } + return cnt +} + +func popcntXorSliceGo(s, m []uint64) uint64 { + cnt := uint64(0) + for i := range s { + cnt += popcount(s[i] ^ m[i]) + } + return cnt +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go new file mode 100644 index 0000000000..fc8ff4f367 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_19.go @@ -0,0 +1,45 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func popcntSlice(s []uint64) uint64 { + var cnt int + for _, x := range s { + cnt += bits.OnesCount64(x) + } + return uint64(cnt) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] &^ m[i]) + } + return uint64(cnt) +} + +func popcntAndSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] & m[i]) + } + return uint64(cnt) +} + +func popcntOrSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] | m[i]) + } + return uint64(cnt) +} + +func popcntXorSlice(s, m []uint64) uint64 { + var cnt int + for i := range s { + cnt += bits.OnesCount64(s[i] ^ m[i]) + } + return uint64(cnt) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go new file mode 100644 index 0000000000..4cf64f24ad --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.go @@ -0,0 +1,68 @@ +// +build !go1.9 +// +build amd64,!appengine + +package bitset + +// *** the following functions are defined in popcnt_amd64.s + +//go:noescape + +func hasAsm() bool + +// useAsm is a flag used to select the GO or ASM implementation of the popcnt function +var useAsm = hasAsm() + +//go:noescape + +func popcntSliceAsm(s []uint64) uint64 + +//go:noescape + +func popcntMaskSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntAndSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntOrSliceAsm(s, m []uint64) uint64 + +//go:noescape + +func popcntXorSliceAsm(s, m []uint64) uint64 + +func popcntSlice(s []uint64) uint64 { + if useAsm { + return popcntSliceAsm(s) + } + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + if useAsm { + return popcntMaskSliceAsm(s, m) + } + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + if useAsm { + return popcntAndSliceAsm(s, m) + } + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + if useAsm { + return popcntOrSliceAsm(s, m) + } + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + if useAsm { + return popcntXorSliceAsm(s, m) + } + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s new file mode 100644 index 0000000000..666c0dcc17 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_amd64.s @@ -0,0 +1,104 @@ +// +build !go1.9 +// +build amd64,!appengine + +TEXT ·hasAsm(SB),4,$0-1 +MOVQ $1, AX +CPUID +SHRQ $23, CX +ANDQ $1, CX +MOVB CX, ret+0(FP) +RET + +#define POPCNTQ_DX_DX BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0xd2 + +TEXT ·popcntSliceAsm(SB),4,$0-32 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntSliceEnd +popcntSliceLoop: +BYTE $0xf3; BYTE $0x48; BYTE $0x0f; BYTE $0xb8; BYTE $0x16 // POPCNTQ (SI), DX +ADDQ DX, AX +ADDQ $8, SI +LOOP popcntSliceLoop +popcntSliceEnd: +MOVQ AX, ret+24(FP) +RET + +TEXT ·popcntMaskSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntMaskSliceEnd +MOVQ m+24(FP), DI +popcntMaskSliceLoop: +MOVQ (DI), DX +NOTQ DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntMaskSliceLoop +popcntMaskSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntAndSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntAndSliceEnd +MOVQ m+24(FP), DI +popcntAndSliceLoop: +MOVQ (DI), DX +ANDQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntAndSliceLoop +popcntAndSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntOrSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntOrSliceEnd +MOVQ m+24(FP), DI +popcntOrSliceLoop: +MOVQ (DI), DX +ORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntOrSliceLoop +popcntOrSliceEnd: +MOVQ AX, ret+48(FP) +RET + +TEXT ·popcntXorSliceAsm(SB),4,$0-56 +XORQ AX, AX +MOVQ s+0(FP), SI +MOVQ s_len+8(FP), CX +TESTQ CX, CX +JZ popcntXorSliceEnd +MOVQ m+24(FP), DI +popcntXorSliceLoop: +MOVQ (DI), DX +XORQ (SI), DX +POPCNTQ_DX_DX +ADDQ DX, AX +ADDQ $8, SI +ADDQ $8, DI +LOOP popcntXorSliceLoop +popcntXorSliceEnd: +MOVQ AX, ret+48(FP) +RET diff --git a/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go new file mode 100644 index 0000000000..21e0ff7b4f --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/popcnt_generic.go @@ -0,0 +1,24 @@ +// +build !go1.9 +// +build !amd64 appengine + +package bitset + +func popcntSlice(s []uint64) uint64 { + return popcntSliceGo(s) +} + +func popcntMaskSlice(s, m []uint64) uint64 { + return popcntMaskSliceGo(s, m) +} + +func popcntAndSlice(s, m []uint64) uint64 { + return popcntAndSliceGo(s, m) +} + +func popcntOrSlice(s, m []uint64) uint64 { + return popcntOrSliceGo(s, m) +} + +func popcntXorSlice(s, m []uint64) uint64 { + return popcntXorSliceGo(s, m) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go new file mode 100644 index 0000000000..c52b61be9f --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_18.go @@ -0,0 +1,14 @@ +// +build !go1.9 + +package bitset + +var deBruijn = [...]byte{ + 0, 1, 56, 2, 57, 49, 28, 3, 61, 58, 42, 50, 38, 29, 17, 4, + 62, 47, 59, 36, 45, 43, 51, 22, 53, 39, 33, 30, 24, 18, 12, 5, + 63, 55, 48, 27, 60, 41, 37, 16, 46, 35, 44, 21, 52, 32, 23, 11, + 54, 26, 40, 15, 34, 20, 31, 10, 25, 14, 19, 9, 13, 8, 7, 6, +} + +func trailingZeroes64(v uint64) uint { + return uint(deBruijn[((v&-v)*0x03f79d71b4ca8b09)>>58]) +} diff --git a/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go new file mode 100644 index 0000000000..36a988e714 --- /dev/null +++ b/vendor/github.com/bits-and-blooms/bitset/trailing_zeros_19.go @@ -0,0 +1,9 @@ +// +build go1.9 + +package bitset + +import "math/bits" + +func trailingZeroes64(v uint64) uint { + return uint(bits.TrailingZeros64(v)) +} diff --git a/vendor/github.com/buildpacks/imgutil/.gitignore b/vendor/github.com/buildpacks/imgutil/.gitignore new file mode 100644 index 0000000000..415f6b6ed7 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/.gitignore @@ -0,0 +1,18 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# IDEs +.idea/ + +# Build timestamp +tools/bcdhive_generator/.build diff --git a/vendor/github.com/buildpacks/imgutil/CODEOWNERS b/vendor/github.com/buildpacks/imgutil/CODEOWNERS new file mode 100644 index 0000000000..d3a11dc1d0 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/CODEOWNERS @@ -0,0 +1 @@ +* @buildpacks/implementation-maintainers diff --git a/vendor/github.com/buildpacks/imgutil/LICENSE b/vendor/github.com/buildpacks/imgutil/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/buildpacks/imgutil/Makefile b/vendor/github.com/buildpacks/imgutil/Makefile new file mode 100644 index 0000000000..f27f0d16af --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/Makefile @@ -0,0 +1,42 @@ +# Go parameters +GOCMD?=go +GO_VERSION=$(shell go list -m -f "{{.GoVersion}}") +PACKAGE_BASE=github.com/buildpacks/imgutil + +all: test + +install-goimports: + @echo "> Installing goimports..." + cd tools && $(GOCMD) install golang.org/x/tools/cmd/goimports + +format: install-goimports + @echo "> Formating code..." + @goimports -l -w -local ${PACKAGE_BASE} . + +install-golangci-lint: + @echo "> Installing golangci-lint..." + cd tools && $(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint + +lint: install-golangci-lint + @echo "> Linting code..." + @golangci-lint run -c golangci.yaml + +tools/bcdhive_generator/.build: $(wildcard tools/bcdhive_generator/*) +ifneq ($(OS),Windows_NT) + @echo "> Building bcdhive-generator in Docker using current golang version" + docker build tools/bcdhive_generator --tag bcdhive-generator --build-arg go_version=$(GO_VERSION) + + @touch tools/bcdhive_generator/.build +else + @echo "> Cannot generate on Windows" +endif + +layer/bcdhive_generated.go: layer/windows_baselayer.go tools/bcdhive_generator/.build +ifneq ($(OS),Windows_NT) + $(GOCMD) generate ./... +else + @echo "> Cannot generate on Windows" +endif + +test: layer/bcdhive_generated.go format lint + $(GOCMD) test -parallel=1 -count=1 -v ./... diff --git a/vendor/github.com/buildpacks/imgutil/README.md b/vendor/github.com/buildpacks/imgutil/README.md new file mode 100644 index 0000000000..79d2eb44c2 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/README.md @@ -0,0 +1,19 @@ +# imgutil + +[![Build results](https://github.com/buildpacks/imgutil/workflows/test/badge.svg)](https://github.com/buildpacks/imgutil/actions) + +Helpful utilities for working with images + +## Development + +To format: + +```bash +$ make format +``` + +To run tests: + +```bash +$ make test +``` diff --git a/vendor/github.com/buildpacks/imgutil/go.mod b/vendor/github.com/buildpacks/imgutil/go.mod new file mode 100644 index 0000000000..bbf58794d7 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/go.mod @@ -0,0 +1,13 @@ +module github.com/buildpacks/imgutil + +require ( + github.com/docker/docker v20.10.8+incompatible + github.com/google/go-cmp v0.5.6 + github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9 + github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect + github.com/pkg/errors v0.9.1 + github.com/sclevine/spec v1.4.0 + google.golang.org/genproto v0.0.0-20210603172842-58e84a565dcf // indirect +) + +go 1.14 diff --git a/vendor/github.com/buildpacks/imgutil/go.sum b/vendor/github.com/buildpacks/imgutil/go.sum new file mode 100644 index 0000000000..5c1857cbc5 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/go.sum @@ -0,0 +1,1170 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.2 h1:MG/Bg1pbmMb61j3wHCFWPxESXHieiKr2xG64px/k8zQ= +github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.6.4 h1:lhJppIf4ULGQXbcjUJIy1sq79UegNTEebDTtfU8MlcA= +github.com/containerd/stargz-snapshotter/estargz v0.6.4/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v20.10.7+incompatible h1:pv/3NqibQKphWZiAskMzdz8w0PRbtTaEB+f6NwdU7Is= +github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM= +github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9 h1:YbairAn5rtnnWrmxe1BQRXl2EY8VptU+moIayUu+PMQ= +github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9/go.mod h1:R5WRYyTdQqTchlBhX4q+WICGh8HQIL5wDFoFZv7Jq6Q= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs= +github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= +github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644 h1:CA1DEQ4NdKphKeL70tvsWNdT5oFh1lOjihRcEDROi0I= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210603172842-58e84a565dcf h1:3oVOonZQld/0ddUsMXCnkhem95RnnQEUMZQLJP1s3jQ= +google.golang.org/genproto v0.0.0-20210603172842-58e84a565dcf/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/buildpacks/imgutil/golangci.yaml b/vendor/github.com/buildpacks/imgutil/golangci.yaml new file mode 100644 index 0000000000..e033922b0e --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/golangci.yaml @@ -0,0 +1,32 @@ +run: + timeout: 6m + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - gocritic + - goimports + - golint + - gosec + - gosimple + - govet + - ineffassign + - maligned + - misspell + - nakedret + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + +linters-settings: + goimports: + local-prefixes: github.com/buildpacks/imgutil \ No newline at end of file diff --git a/vendor/github.com/buildpacks/imgutil/image.go b/vendor/github.com/buildpacks/imgutil/image.go new file mode 100644 index 0000000000..ba99143ab7 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/image.go @@ -0,0 +1,74 @@ +package imgutil + +import ( + "fmt" + "io" + "strings" + "time" +) + +var NormalizedDateTime = time.Date(1980, time.January, 1, 0, 0, 1, 0, time.UTC) + +type SaveDiagnostic struct { + ImageName string + Cause error +} + +type SaveError struct { + Errors []SaveDiagnostic +} + +func (e SaveError) Error() string { + var errors []string + for _, d := range e.Errors { + errors = append(errors, fmt.Sprintf("[%s: %s]", d.ImageName, d.Cause.Error())) + } + return fmt.Sprintf("failed to write image to the following tags: %s", strings.Join(errors, ",")) +} + +// Platform represents the target arch/os/os_version for an image construction and querying. +type Platform struct { + Architecture string + OS string + OSVersion string +} + +type Image interface { + Name() string + Rename(name string) + Label(string) (string, error) + Labels() (map[string]string, error) + SetLabel(string, string) error + RemoveLabel(string) error + Env(key string) (string, error) + Entrypoint() ([]string, error) + SetEnv(string, string) error + SetEntrypoint(...string) error + SetWorkingDir(string) error + SetCmd(...string) error + SetOS(string) error + SetOSVersion(string) error + SetArchitecture(string) error + Rebase(string, Image) error + AddLayer(path string) error + AddLayerWithDiffID(path, diffID string) error + ReuseLayer(diffID string) error + // TopLayer returns the diff id for the top layer + TopLayer() (string, error) + // Save saves the image as `Name()` and any additional names provided to this method. + Save(additionalNames ...string) error + // Found tells whether the image exists in the repository by `Name()`. + Found() bool + // GetLayer retrieves layer by diff id. Returns a reader of the uncompressed contents of the layer. + GetLayer(diffID string) (io.ReadCloser, error) + Delete() error + CreatedAt() (time.Time, error) + Identifier() (Identifier, error) + OS() (string, error) + OSVersion() (string, error) + Architecture() (string, error) + // ManifestSize returns the size of the manifest. If a manifest doesn't exist, it returns 0. + ManifestSize() (int64, error) +} + +type Identifier fmt.Stringer diff --git a/vendor/github.com/buildpacks/imgutil/layer/bcdhive_generated.go b/vendor/github.com/buildpacks/imgutil/layer/bcdhive_generated.go new file mode 100644 index 0000000000..657517721e --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/layer/bcdhive_generated.go @@ -0,0 +1,31 @@ +package layer + +// Code generated by github.com/buildpacks/imgutil/tools/bcdhive_generator DO NOT EDIT + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "io/ioutil" +) + +const encodedBytes = "H4sIAAAAAAAC/+yaPWwbZRjH/5cmJAQoF/tcBanDoUSwcJXPvri+KaJN2gAlQZBWDBm4j9fkajuxbAOtqqCMGTuwMCB5YOhCxcYICxJiQB4Zu9EBoUgsgYEXPfeR88dZbRFDpT6/6HyP3/d5/T73PH/Feh+5LT6uKQpA14Oj/lcvff0zmZhBBNk6oheyd7CKVezgOjZxGTvooo0AHuqh3UQLO1jDOq7gTVzHNWyDeVr5Pqj+zVlgGIZhGIZhGIZ5Nth1g73QUNOxpA9Adk9KuVd/A/rrl75Mxn6YBaaS9QuAlFKSTffD+H4uY68XASwvL7/3/tb21oWNt27Q2E//SNmpA7SOrhcAnFcABdNfQAFUJVqrgfb8HfPhu0W8gikoURAzOoBXyZ5fpPeHY+MZ/ksT/Jdi/1W8POSvp/7q2Di0yD+KdSZHr/Na5Ds4NvFzh3zPhH2XEZ/hzwE2zrJ2GYZhGIZhGIZhmCc8/6vD5/9BovO/PnT+1wfmZQwdV++q0Rk+6QckvouxTWf7NdHx2kGrG+ynbYfGrgJdBfzyj78cSyl7KtCP4+lLKT+tq5imczpA9/B3CFeCdvMzpy3e3feDWiD8095F6D8b7nmsRr50vSNubzpNEc5fwmWsoTjyF3FvwvMm/Y4NLbvfcS0jd7MAttybwut28FBK2didOn1OdQH4Y/3+20cZ+9Fcsh/yj99feQ3AnYpT8cya6Rortm8Zpikcw7bKVcO2iiVRcVzftqsHad4PF4AP/lz9K6vONDdaZ5q4lRuv81xcm0fVeS4XPf8JPUsuqdd0WN8z0NWkvtu3W2KS/iiuuC2EWxPykyVninG9IZpir9tJxpK6JHH1coB9/7d7Wfv2ctn5OMqP5+MsgPMD+5qlImEl88fhvgoW88DSN899R/m4m0/1S2u/zaf6jeMO5118ggAN+GjBCX9708EFBNinWB6hpw8LT6gn2xdOtVLyjBXP9w1LXCwajueZRq1ccsuW5VsX7ZWDNI+Jnvp54Orn3atZeaS5rDz2tf+uq4+0VFe/asO6moL6WLrq51Nd9Qv/j65O49Im6+pEy87Hg0K2rjYH9i2VQ12VR3XVKgAn8zduUj4eFlJd0dq5c+O6ov97d1CBgwo8mKjBhAsDK7Dhw4IBEyYEHBiwYaGMamwVUYIIV7rwYcNGFQfp98fz/B3LMAzDMAzzNPFvAAAA///Odx8+ADAAAA==" + +func BaseLayerBCD() ([]byte, error) { + gzipBytes, err := base64.StdEncoding.DecodeString(encodedBytes) + if err != nil { + return nil, err + } + + gzipReader, err := gzip.NewReader(bytes.NewBuffer(gzipBytes)) + if err != nil { + return nil, err + } + + decodedBytes, err := ioutil.ReadAll(gzipReader) + if err != nil { + return nil, err + } + + return decodedBytes, nil +} diff --git a/vendor/github.com/buildpacks/imgutil/layer/pax_permissions.go b/vendor/github.com/buildpacks/imgutil/layer/pax_permissions.go new file mode 100644 index 0000000000..7bbe5f7ca2 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/layer/pax_permissions.go @@ -0,0 +1,12 @@ +package layer + +// $sddl = (ConvertFrom-SddlString $sddlValue) +// $sddlBytes = [byte[]]::New($sddl.RawDescriptor.BinaryLength) +// $sddl.RawDescriptor.GetBinaryForm($sddlBytes, 0) +// [Convert]::ToBase64String($sddlBytes) + +// owner: BUILTIN/Administrators group: BUILTIN/Administrators ($sddlValue="O:BAG:BA") +const AdministratratorOwnerAndGroupSID = "AQAAgBQAAAAkAAAAAAAAAAAAAAABAgAAAAAABSAAAAAgAgAAAQIAAAAAAAUgAAAAIAIAAA==" + +// owner: BUILTIN/Users group: BUILTIN/Users ($sddlValue="O:BUG:BU") +const UserOwnerAndGroupSID = "AQAAgBQAAAAkAAAAAAAAAAAAAAABAgAAAAAABSAAAAAhAgAAAQIAAAAAAAUgAAAAIQIAAA==" diff --git a/vendor/github.com/buildpacks/imgutil/layer/windows_baselayer.go b/vendor/github.com/buildpacks/imgutil/layer/windows_baselayer.go new file mode 100644 index 0000000000..d973bb74cc --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/layer/windows_baselayer.go @@ -0,0 +1,84 @@ +package layer + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate using `make generate` +//go:generate docker run --rm -v $PWD:/out/ bcdhive-generator -file=/out/layer/bcdhive_generated.go -package=layer -func=BaseLayerBCD + +// Windows base layers must follow this pattern: +// \-> UtilityVM/Files/EFI/Microsoft/Boot/BCD (file must exist and a valid BCD format - from bcdhive_gen) +// \-> Files/Windows/System32/config/DEFAULT (file and must exist but can be empty) +// \-> Files/Windows/System32/config/SAM (file must exist but can be empty) +// \-> Files/Windows/System32/config/SECURITY (file must exist but can be empty) +// \-> Files/Windows/System32/config/SOFTWARE (file must exist but can be empty) +// \-> Files/Windows/System32/config/SYSTEM (file must exist but can be empty) +// Refs: +// https://github.com/microsoft/hcsshim/blob/master/internal/wclayer/legacy.go +// https://github.com/moby/moby/blob/master/daemon/graphdriver/windows/windows.go#L48 +func WindowsBaseLayer() (io.Reader, error) { + bcdBytes, err := BaseLayerBCD() + if err != nil { + return nil, err + } + + layerBuffer := &bytes.Buffer{} + tw := tar.NewWriter(layerBuffer) + + if err := tw.WriteHeader(&tar.Header{Name: "UtilityVM", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "UtilityVM/Files", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "UtilityVM/Files/EFI", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "UtilityVM/Files/EFI/Microsoft", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "UtilityVM/Files/EFI/Microsoft/Boot", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + + if err := tw.WriteHeader(&tar.Header{Name: "UtilityVM/Files/EFI/Microsoft/Boot/BCD", Size: int64(len(bcdBytes)), Mode: 0644}); err != nil { + return nil, err + } + if _, err := tw.Write(bcdBytes); err != nil { + return nil, err + } + + if err := tw.WriteHeader(&tar.Header{Name: "Files", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows/System32", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows/System32/config", Typeflag: tar.TypeDir}); err != nil { + return nil, err + } + + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows/System32/config/DEFAULT", Size: 0, Mode: 0644}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows/System32/config/SAM", Size: 0, Mode: 0644}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows/System32/config/SECURITY", Size: 0, Mode: 0644}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows/System32/config/SOFTWARE", Size: 0, Mode: 0644}); err != nil { + return nil, err + } + if err := tw.WriteHeader(&tar.Header{Name: "Files/Windows/System32/config/SYSTEM", Size: 0, Mode: 0644}); err != nil { + return nil, err + } + + return layerBuffer, nil +} diff --git a/vendor/github.com/buildpacks/imgutil/layer/windows_writer.go b/vendor/github.com/buildpacks/imgutil/layer/windows_writer.go new file mode 100644 index 0000000000..8f87f0934f --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/layer/windows_writer.go @@ -0,0 +1,130 @@ +package layer + +import ( + "archive/tar" + "fmt" + "io" + "path" + "strings" +) + +type WindowsWriter struct { + tarWriter *tar.Writer + writtenParentPaths map[string]bool +} + +func NewWindowsWriter(fileWriter io.Writer) *WindowsWriter { + return &WindowsWriter{ + tarWriter: tar.NewWriter(fileWriter), + writtenParentPaths: map[string]bool{}, + } +} + +func (w *WindowsWriter) Write(content []byte) (int, error) { + return w.tarWriter.Write(content) +} + +func (w *WindowsWriter) WriteHeader(header *tar.Header) error { + if err := w.initializeLayer(); err != nil { + return err + } + + if err := w.validateHeader(header); err != nil { + return err + } + header.Name = layerFilesPath(header.Name) + + err := w.writeParentPaths(header.Name) + if err != nil { + return err + } + + header.Format = tar.FormatPAX + if header.PAXRecords == nil { + header.PAXRecords = map[string]string{} + } + ensureSecurityDescriptor(header) + + if header.Typeflag == tar.TypeDir { + return w.writeDirHeader(header) + } + return w.tarWriter.WriteHeader(header) +} + +func ensureSecurityDescriptor(header *tar.Header) { + if _, ok := header.PAXRecords["MSWINDOWS.rawsd"]; !ok { + if header.Uid == 0 && header.Gid == 0 { + header.PAXRecords["MSWINDOWS.rawsd"] = AdministratratorOwnerAndGroupSID + } else { + header.PAXRecords["MSWINDOWS.rawsd"] = UserOwnerAndGroupSID + } + } +} + +func (w *WindowsWriter) Close() (err error) { + defer func() { + closeErr := w.tarWriter.Close() + if err == nil { + err = closeErr + } + }() + return w.initializeLayer() +} + +func (w *WindowsWriter) Flush() error { + return w.tarWriter.Flush() +} + +func (w *WindowsWriter) writeParentPaths(childPath string) error { + var parentDir string + for _, pathPart := range strings.Split(path.Dir(childPath), "/") { + parentDir = path.Join(parentDir, pathPart) + + if err := w.writeDirHeader(&tar.Header{ + Name: parentDir, + Typeflag: tar.TypeDir, + }); err != nil { + return err + } + } + return nil +} + +func layerFilesPath(origPath string) string { + return path.Join("Files", origPath) +} + +func (w *WindowsWriter) initializeLayer() error { + if err := w.writeDirHeader(&tar.Header{ + Name: "Files", + Typeflag: tar.TypeDir, + }); err != nil { + return err + } + if err := w.writeDirHeader(&tar.Header{ + Name: "Hives", + Typeflag: tar.TypeDir, + }); err != nil { + return err + } + return nil +} + +func (w *WindowsWriter) writeDirHeader(header *tar.Header) error { + if w.writtenParentPaths[header.Name] { + return nil + } + + if err := w.tarWriter.WriteHeader(header); err != nil { + return err + } + w.writtenParentPaths[header.Name] = true + return nil +} + +func (w *WindowsWriter) validateHeader(header *tar.Header) error { + if !path.IsAbs(header.Name) { + return fmt.Errorf("invalid header name: must be absolute, posix path: %s", header.Name) + } + return nil +} diff --git a/vendor/github.com/buildpacks/imgutil/local/identifier.go b/vendor/github.com/buildpacks/imgutil/local/identifier.go new file mode 100644 index 0000000000..dd24fba1bb --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/local/identifier.go @@ -0,0 +1,9 @@ +package local + +type IDIdentifier struct { + ImageID string +} + +func (i IDIdentifier) String() string { + return i.ImageID +} diff --git a/vendor/github.com/buildpacks/imgutil/local/local.go b/vendor/github.com/buildpacks/imgutil/local/local.go new file mode 100644 index 0000000000..5751a5c839 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/local/local.go @@ -0,0 +1,885 @@ +package local + +import ( + "archive/tar" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/buildpacks/imgutil/layer" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/pkg/errors" + + "github.com/buildpacks/imgutil" +) + +type Image struct { + docker client.CommonAPIClient + repoName string + inspect types.ImageInspect + layerPaths []string + prevImage *Image // reused layers will be fetched from prevImage + downloadBaseOnce *sync.Once +} + +type ImageOption func(*options) error + +type options struct { + platform imgutil.Platform + baseImageRepoName string + prevImageRepoName string +} + +//WithPreviousImage loads an existing image as a source for reusable layers. +//Use with ReuseLayer(). +//Ignored if image is not found. +func WithPreviousImage(imageName string) ImageOption { + return func(i *options) error { + i.prevImageRepoName = imageName + return nil + } +} + +//FromBaseImage loads an existing image as the config and layers for the new image. +//Ignored if image is not found. +func FromBaseImage(imageName string) ImageOption { + return func(i *options) error { + i.baseImageRepoName = imageName + return nil + } +} + +//WithDefaultPlatform provides Architecture/OS/OSVersion defaults for the new image. +//Defaults for a new image are ignored when FromBaseImage returns an image. +func WithDefaultPlatform(platform imgutil.Platform) ImageOption { + return func(i *options) error { + i.platform = platform + return nil + } +} + +//NewImage returns a new Image that can be modified and saved to a registry. +func NewImage(repoName string, dockerClient client.CommonAPIClient, ops ...ImageOption) (*Image, error) { + imageOpts := &options{} + for _, op := range ops { + if err := op(imageOpts); err != nil { + return nil, err + } + } + + platform, err := defaultPlatform(dockerClient) + if err != nil { + return nil, err + } + + if (imageOpts.platform != imgutil.Platform{}) { + if err := validatePlatformOption(platform, imageOpts.platform); err != nil { + return nil, err + } + platform = imageOpts.platform + } + + inspect := defaultInspect(platform) + + image := &Image{ + docker: dockerClient, + repoName: repoName, + inspect: inspect, + layerPaths: make([]string, len(inspect.RootFS.Layers)), + downloadBaseOnce: &sync.Once{}, + } + + if imageOpts.prevImageRepoName != "" { + if err := processPreviousImageOption(image, imageOpts.prevImageRepoName, platform, dockerClient); err != nil { + return nil, err + } + } + + if imageOpts.baseImageRepoName != "" { + if err := processBaseImageOption(image, imageOpts.baseImageRepoName, platform, dockerClient); err != nil { + return nil, err + } + } + + if image.inspect.Os == "windows" { + if err := prepareNewWindowsImage(image); err != nil { + return nil, err + } + } + + return image, nil +} + +func validatePlatformOption(defaultPlatform imgutil.Platform, optionPlatform imgutil.Platform) error { + if optionPlatform.OS != "" && optionPlatform.OS != defaultPlatform.OS { + return fmt.Errorf("invalid os: platform os %q must match the daemon os %q", optionPlatform.OS, defaultPlatform.OS) + } + + return nil +} + +func processPreviousImageOption(image *Image, prevImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { + if _, err := inspectOptionalImage(dockerClient, prevImageRepoName, platform); err != nil { + return err + } + + prevImage, err := NewImage(prevImageRepoName, dockerClient, FromBaseImage(prevImageRepoName)) + if err != nil { + return errors.Wrapf(err, "getting previous image %q", prevImageRepoName) + } + + image.prevImage = prevImage + + return nil +} + +func processBaseImageOption(image *Image, baseImageRepoName string, platform imgutil.Platform, dockerClient client.CommonAPIClient) error { + inspect, err := inspectOptionalImage(dockerClient, baseImageRepoName, platform) + if err != nil { + return err + } + + image.inspect = inspect + image.layerPaths = make([]string, len(image.inspect.RootFS.Layers)) + + return nil +} + +func prepareNewWindowsImage(image *Image) error { + // only append base layer to empty image + if len(image.inspect.RootFS.Layers) > 0 { + return nil + } + + layerReader, err := layer.WindowsBaseLayer() + if err != nil { + return err + } + + layerFile, err := ioutil.TempFile("", "imgutil.local.image.windowsbaselayer") + if err != nil { + return errors.Wrap(err, "creating temp file") + } + defer layerFile.Close() + + hasher := sha256.New() + + multiWriter := io.MultiWriter(layerFile, hasher) + + if _, err := io.Copy(multiWriter, layerReader); err != nil { + return errors.Wrap(err, "copying base layer") + } + + diffID := "sha256:" + hex.EncodeToString(hasher.Sum(nil)) + + if err := image.AddLayerWithDiffID(layerFile.Name(), diffID); err != nil { + return errors.Wrap(err, "adding base layer to image") + } + + return nil +} + +func (i *Image) Label(key string) (string, error) { + labels := i.inspect.Config.Labels + return labels[key], nil +} + +func (i *Image) Labels() (map[string]string, error) { + copiedLabels := make(map[string]string) + for i, l := range i.inspect.Config.Labels { + copiedLabels[i] = l + } + return copiedLabels, nil +} + +func (i *Image) Env(key string) (string, error) { + for _, envVar := range i.inspect.Config.Env { + parts := strings.Split(envVar, "=") + if parts[0] == key { + return parts[1], nil + } + } + return "", nil +} + +func (i *Image) Entrypoint() ([]string, error) { + return i.inspect.Config.Entrypoint, nil +} + +func (i *Image) OS() (string, error) { + return i.inspect.Os, nil +} + +func (i *Image) OSVersion() (string, error) { + return i.inspect.OsVersion, nil +} + +func (i *Image) Architecture() (string, error) { + return i.inspect.Architecture, nil +} + +func (i *Image) Rename(name string) { + i.repoName = name +} + +func (i *Image) Name() string { + return i.repoName +} + +func (i *Image) Found() bool { + return i.inspect.ID != "" +} + +func (i *Image) Identifier() (imgutil.Identifier, error) { + return IDIdentifier{ + ImageID: strings.TrimPrefix(i.inspect.ID, "sha256:"), + }, nil +} + +func (i *Image) CreatedAt() (time.Time, error) { + createdAtTime := i.inspect.Created + createdTime, err := time.Parse(time.RFC3339Nano, createdAtTime) + + if err != nil { + return time.Time{}, err + } + return createdTime, nil +} + +func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error { + ctx := context.Background() + + // FIND TOP LAYER + var keepLayersIdx int + for idx, diffID := range i.inspect.RootFS.Layers { + if diffID == baseTopLayer { + keepLayersIdx = idx + 1 + break + } + } + if keepLayersIdx == 0 { + return fmt.Errorf("%q not found in %q during rebase", baseTopLayer, i.repoName) + } + + // DOWNLOAD IMAGE + if err := i.downloadBaseLayersOnce(); err != nil { + return err + } + + // SWITCH BASE LAYERS + newBaseInspect, _, err := i.docker.ImageInspectWithRaw(ctx, newBase.Name()) + if err != nil { + return errors.Wrapf(err, "read config for new base image %q", newBase) + } + i.inspect.ID = newBaseInspect.ID + i.downloadBaseOnce = &sync.Once{} + i.inspect.RootFS.Layers = append(newBaseInspect.RootFS.Layers, i.inspect.RootFS.Layers[keepLayersIdx:]...) + i.layerPaths = append(make([]string, len(newBaseInspect.RootFS.Layers)), i.layerPaths[keepLayersIdx:]...) + return nil +} + +func (i *Image) SetLabel(key, val string) error { + if i.inspect.Config.Labels == nil { + i.inspect.Config.Labels = map[string]string{} + } + + i.inspect.Config.Labels[key] = val + return nil +} + +func (i *Image) SetOS(osVal string) error { + if osVal != i.inspect.Os { + return fmt.Errorf("invalid os: must match the daemon: %q", i.inspect.Os) + } + return nil +} + +func (i *Image) SetOSVersion(osVersion string) error { + i.inspect.OsVersion = osVersion + return nil +} + +func (i *Image) SetArchitecture(architecture string) error { + i.inspect.Architecture = architecture + return nil +} + +func (i *Image) RemoveLabel(key string) error { + delete(i.inspect.Config.Labels, key) + return nil +} + +func (i *Image) SetEnv(key, val string) error { + ignoreCase := i.inspect.Os == "windows" + for idx, kv := range i.inspect.Config.Env { + parts := strings.SplitN(kv, "=", 2) + foundKey := parts[0] + searchKey := key + if ignoreCase { + foundKey = strings.ToUpper(foundKey) + searchKey = strings.ToUpper(searchKey) + } + if foundKey == searchKey { + i.inspect.Config.Env[idx] = fmt.Sprintf("%s=%s", key, val) + return nil + } + } + i.inspect.Config.Env = append(i.inspect.Config.Env, fmt.Sprintf("%s=%s", key, val)) + return nil +} + +func (i *Image) SetWorkingDir(dir string) error { + i.inspect.Config.WorkingDir = dir + return nil +} + +func (i *Image) SetEntrypoint(ep ...string) error { + i.inspect.Config.Entrypoint = ep + return nil +} + +func (i *Image) SetCmd(cmd ...string) error { + i.inspect.Config.Cmd = cmd + return nil +} + +func (i *Image) TopLayer() (string, error) { + all := i.inspect.RootFS.Layers + + if len(all) == 0 { + return "", fmt.Errorf("image %q has no layers", i.repoName) + } + + topLayer := all[len(all)-1] + return topLayer, nil +} + +func (i *Image) GetLayer(diffID string) (io.ReadCloser, error) { + for l := range i.inspect.RootFS.Layers { + if i.inspect.RootFS.Layers[l] != diffID { + continue + } + if i.layerPaths[l] == "" { + if err := i.downloadBaseLayersOnce(); err != nil { + return nil, err + } + if i.layerPaths[l] == "" { + return nil, fmt.Errorf("fetching layer %q from daemon", diffID) + } + } + return os.Open(i.layerPaths[l]) + } + + return nil, fmt.Errorf("image %q does not contain layer with diff ID %q", i.repoName, diffID) +} + +func (i *Image) AddLayer(path string) error { + f, err := os.Open(filepath.Clean(path)) + if err != nil { + return errors.Wrapf(err, "AddLayer: open layer: %s", path) + } + defer f.Close() + hasher := sha256.New() + if _, err := io.Copy(hasher, f); err != nil { + return errors.Wrapf(err, "AddLayer: calculate checksum: %s", path) + } + diffID := "sha256:" + hex.EncodeToString(hasher.Sum(make([]byte, 0, hasher.Size()))) + return i.AddLayerWithDiffID(path, diffID) +} + +func (i *Image) AddLayerWithDiffID(path, diffID string) error { + i.inspect.RootFS.Layers = append(i.inspect.RootFS.Layers, diffID) + i.layerPaths = append(i.layerPaths, path) + return nil +} + +func (i *Image) ReuseLayer(diffID string) error { + if i.prevImage == nil { + return errors.New("failed to reuse layer because no previous image was provided") + } + if !i.prevImage.Found() { + return fmt.Errorf("failed to reuse layer because previous image %q was not found in daemon", i.prevImage.repoName) + } + + if err := i.prevImage.downloadBaseLayersOnce(); err != nil { + return err + } + + for l := range i.prevImage.inspect.RootFS.Layers { + if i.prevImage.inspect.RootFS.Layers[l] == diffID { + return i.AddLayerWithDiffID(i.prevImage.layerPaths[l], diffID) + } + } + return fmt.Errorf("SHA %s was not found in %s", diffID, i.prevImage.Name()) +} + +func (i *Image) Save(additionalNames ...string) error { + // during the first save attempt some layers may be excluded. The docker daemon allows this if the given set + // of layers already exists in the daemon in the given order + inspect, err := i.doSave() + if err != nil { + // populate all layer paths and try again without the above performance optimization. + if err := i.downloadBaseLayersOnce(); err != nil { + return err + } + + inspect, err = i.doSave() + if err != nil { + saveErr := imgutil.SaveError{} + for _, n := range append([]string{i.Name()}, additionalNames...) { + saveErr.Errors = append(saveErr.Errors, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) + } + return saveErr + } + } + i.inspect = inspect + + var errs []imgutil.SaveDiagnostic + for _, n := range append([]string{i.Name()}, additionalNames...) { + if err := i.docker.ImageTag(context.Background(), i.inspect.ID, n); err != nil { + errs = append(errs, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) + } + } + + if len(errs) > 0 { + return imgutil.SaveError{Errors: errs} + } + + return nil +} + +func (i *Image) doSave() (types.ImageInspect, error) { + ctx := context.Background() + done := make(chan error) + + t, err := name.NewTag(i.repoName, name.WeakValidation) + if err != nil { + return types.ImageInspect{}, err + } + + // returns valid 'name:tag' appending 'latest', if missing tag + repoName := t.Name() + + pr, pw := io.Pipe() + defer pw.Close() + go func() { + res, err := i.docker.ImageLoad(ctx, pr, true) + if err != nil { + done <- err + return + } + + // only return response error after response is drained and closed + responseErr := checkResponseError(res.Body) + drainCloseErr := ensureReaderClosed(res.Body) + if responseErr != nil { + done <- responseErr + return + } + if drainCloseErr != nil { + done <- drainCloseErr + } + + done <- nil + }() + + tw := tar.NewWriter(pw) + defer tw.Close() + + configFile, err := i.newConfigFile() + if err != nil { + return types.ImageInspect{}, errors.Wrap(err, "generating config file") + } + + id := fmt.Sprintf("%x", sha256.Sum256(configFile)) + if err := addTextToTar(tw, id+".json", configFile); err != nil { + return types.ImageInspect{}, err + } + + var blankIdx int + var layerPaths []string + for _, path := range i.layerPaths { + if path == "" { + layerName := fmt.Sprintf("blank_%d", blankIdx) + blankIdx++ + hdr := &tar.Header{Name: layerName, Mode: 0644, Size: 0} + if err := tw.WriteHeader(hdr); err != nil { + return types.ImageInspect{}, err + } + layerPaths = append(layerPaths, layerName) + } else { + layerName := fmt.Sprintf("/%x.tar", sha256.Sum256([]byte(path))) + f, err := os.Open(filepath.Clean(path)) + if err != nil { + return types.ImageInspect{}, err + } + defer f.Close() + if err := addFileToTar(tw, layerName, f); err != nil { + return types.ImageInspect{}, err + } + f.Close() + layerPaths = append(layerPaths, layerName) + } + } + + manifest, err := json.Marshal([]map[string]interface{}{ + { + "Config": id + ".json", + "RepoTags": []string{repoName}, + "Layers": layerPaths, + }, + }) + if err != nil { + return types.ImageInspect{}, err + } + + if err := addTextToTar(tw, "manifest.json", manifest); err != nil { + return types.ImageInspect{}, err + } + + tw.Close() + pw.Close() + err = <-done + if err != nil { + return types.ImageInspect{}, errors.Wrapf(err, "loading image %q. first error", i.repoName) + } + + inspect, _, err := i.docker.ImageInspectWithRaw(context.Background(), id) + if err != nil { + if client.IsErrNotFound(err) { + return types.ImageInspect{}, errors.Wrapf(err, "saving image %q", i.repoName) + } + return types.ImageInspect{}, err + } + + return inspect, nil +} + +func (i *Image) newConfigFile() ([]byte, error) { + cfg, err := v1Config(i.inspect) + if err != nil { + return nil, err + } + return json.Marshal(cfg) +} + +func (i *Image) Delete() error { + if !i.Found() { + return nil + } + options := types.ImageRemoveOptions{ + Force: true, + PruneChildren: true, + } + _, err := i.docker.ImageRemove(context.Background(), i.inspect.ID, options) + return err +} + +func (i *Image) ManifestSize() (int64, error) { + return 0, nil +} + +// downloadBaseLayersOnce exports the base image from the daemon and populates layerPaths the first time it is called. +// subsequent calls do nothing. +func (i *Image) downloadBaseLayersOnce() error { + var err error + if !i.Found() { + return nil + } + i.downloadBaseOnce.Do(func() { + err = i.downloadBaseLayers() + }) + if err != nil { + return errors.Wrap(err, "fetching base layers") + } + return err +} + +func (i *Image) downloadBaseLayers() error { + ctx := context.Background() + + imageReader, err := i.docker.ImageSave(ctx, []string{i.inspect.ID}) + if err != nil { + return errors.Wrapf(err, "saving base image with ID %q from the docker daemon", i.inspect.ID) + } + defer ensureReaderClosed(imageReader) + + tmpDir, err := ioutil.TempDir("", "imgutil.local.image.") + if err != nil { + return errors.Wrap(err, "failed to create temp dir") + } + + err = untar(imageReader, tmpDir) + if err != nil { + return err + } + + mf, err := os.Open(filepath.Clean(filepath.Join(tmpDir, "manifest.json"))) + if err != nil { + return err + } + defer mf.Close() + + var manifest []struct { + Config string + Layers []string + } + if err := json.NewDecoder(mf).Decode(&manifest); err != nil { + return err + } + + if len(manifest) != 1 { + return fmt.Errorf("manifest.json had unexpected number of entries: %d", len(manifest)) + } + + df, err := os.Open(filepath.Clean(filepath.Join(tmpDir, manifest[0].Config))) + if err != nil { + return err + } + defer df.Close() + + var details struct { + RootFS struct { + DiffIDs []string `json:"diff_ids"` + } `json:"rootfs"` + } + + if err = json.NewDecoder(df).Decode(&details); err != nil { + return err + } + + for l := range details.RootFS.DiffIDs { + i.layerPaths[l] = filepath.Join(tmpDir, manifest[0].Layers[l]) + } + + for l := range i.layerPaths { + if i.layerPaths[l] == "" { + return errors.New("failed to download all base layers from daemon") + } + } + + return nil +} + +func addTextToTar(tw *tar.Writer, name string, contents []byte) error { + hdr := &tar.Header{Name: name, Mode: 0644, Size: int64(len(contents))} + if err := tw.WriteHeader(hdr); err != nil { + return err + } + _, err := tw.Write(contents) + return err +} + +func addFileToTar(tw *tar.Writer, name string, contents *os.File) error { + fi, err := contents.Stat() + if err != nil { + return err + } + hdr := &tar.Header{Name: name, Mode: 0644, Size: fi.Size()} + if err := tw.WriteHeader(hdr); err != nil { + return err + } + _, err = io.Copy(tw, contents) + return err +} + +func untar(r io.Reader, dest string) error { + tr := tar.NewReader(r) + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + return nil + } + if err != nil { + return err + } + + path := filepath.Join(dest, hdr.Name) + + switch hdr.Typeflag { + case tar.TypeDir: + if err := os.MkdirAll(path, hdr.FileInfo().Mode()); err != nil { + return err + } + case tar.TypeReg, tar.TypeRegA: + _, err := os.Stat(filepath.Dir(path)) + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { + return err + } + } + + fh, err := os.OpenFile(filepath.Clean(path), os.O_CREATE|os.O_WRONLY, hdr.FileInfo().Mode()) + if err != nil { + return err + } + if _, err := io.Copy(fh, tr); err != nil { + fh.Close() + return err + } + fh.Close() + case tar.TypeSymlink: + _, err := os.Stat(filepath.Dir(path)) + if os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(path), 0750); err != nil { + return err + } + } + + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + default: + return fmt.Errorf("unknown file type in tar %d", hdr.Typeflag) + } + } +} + +func inspectOptionalImage(docker client.CommonAPIClient, imageName string, platform imgutil.Platform) (types.ImageInspect, error) { + var ( + err error + inspect types.ImageInspect + ) + + if inspect, _, err = docker.ImageInspectWithRaw(context.Background(), imageName); err != nil { + if client.IsErrNotFound(err) { + return defaultInspect(platform), nil + } + + return types.ImageInspect{}, errors.Wrapf(err, "verifying image %q", imageName) + } + + return inspect, nil +} + +func defaultInspect(platform imgutil.Platform) types.ImageInspect { + return types.ImageInspect{ + Os: platform.OS, + Architecture: platform.Architecture, + OsVersion: platform.OSVersion, + Config: &container.Config{}, + } +} + +func defaultPlatform(dockerClient client.CommonAPIClient) (imgutil.Platform, error) { + daemonInfo, err := dockerClient.Info(context.Background()) + if err != nil { + return imgutil.Platform{}, err + } + + return imgutil.Platform{ + OS: daemonInfo.OSType, + Architecture: "amd64", + }, nil +} + +func v1Config(inspect types.ImageInspect) (v1.ConfigFile, error) { + history := make([]v1.History, len(inspect.RootFS.Layers)) + for i := range history { + // zero history + history[i] = v1.History{ + Created: v1.Time{Time: imgutil.NormalizedDateTime}, + } + } + diffIDs := make([]v1.Hash, len(inspect.RootFS.Layers)) + for i, layer := range inspect.RootFS.Layers { + hash, err := v1.NewHash(layer) + if err != nil { + return v1.ConfigFile{}, err + } + diffIDs[i] = hash + } + exposedPorts := make(map[string]struct{}, len(inspect.Config.ExposedPorts)) + for key, val := range inspect.Config.ExposedPorts { + exposedPorts[string(key)] = val + } + var config v1.Config + if inspect.Config != nil { + var healthcheck *v1.HealthConfig + if inspect.Config.Healthcheck != nil { + healthcheck = &v1.HealthConfig{ + Test: inspect.Config.Healthcheck.Test, + Interval: inspect.Config.Healthcheck.Interval, + Timeout: inspect.Config.Healthcheck.Timeout, + StartPeriod: inspect.Config.Healthcheck.StartPeriod, + Retries: inspect.Config.Healthcheck.Retries, + } + } + config = v1.Config{ + AttachStderr: inspect.Config.AttachStderr, + AttachStdin: inspect.Config.AttachStdin, + AttachStdout: inspect.Config.AttachStdout, + Cmd: inspect.Config.Cmd, + Healthcheck: healthcheck, + Domainname: inspect.Config.Domainname, + Entrypoint: inspect.Config.Entrypoint, + Env: inspect.Config.Env, + Hostname: inspect.Config.Hostname, + Image: inspect.Config.Image, + Labels: inspect.Config.Labels, + OnBuild: inspect.Config.OnBuild, + OpenStdin: inspect.Config.OpenStdin, + StdinOnce: inspect.Config.StdinOnce, + Tty: inspect.Config.Tty, + User: inspect.Config.User, + Volumes: inspect.Config.Volumes, + WorkingDir: inspect.Config.WorkingDir, + ExposedPorts: exposedPorts, + ArgsEscaped: inspect.Config.ArgsEscaped, + NetworkDisabled: inspect.Config.NetworkDisabled, + MacAddress: inspect.Config.MacAddress, + StopSignal: inspect.Config.StopSignal, + Shell: inspect.Config.Shell, + } + } + return v1.ConfigFile{ + Architecture: inspect.Architecture, + Created: v1.Time{Time: imgutil.NormalizedDateTime}, + History: history, + OS: inspect.Os, + OSVersion: inspect.OsVersion, + RootFS: v1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + }, + Config: config, + }, nil +} + +func checkResponseError(r io.Reader) error { + decoder := json.NewDecoder(r) + var jsonMessage jsonmessage.JSONMessage + if err := decoder.Decode(&jsonMessage); err != nil { + return errors.Wrapf(err, "parsing daemon response") + } + + if jsonMessage.Error != nil { + return errors.Wrap(jsonMessage.Error, "embedded daemon response") + } + return nil +} + +// ensureReaderClosed drains and closes and reader, returning the first error +func ensureReaderClosed(r io.ReadCloser) error { + _, err := io.Copy(ioutil.Discard, r) + if closeErr := r.Close(); closeErr != nil && err == nil { + err = closeErr + } + return err +} diff --git a/vendor/github.com/buildpacks/imgutil/remote/identifier.go b/vendor/github.com/buildpacks/imgutil/remote/identifier.go new file mode 100644 index 0000000000..5638905357 --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/remote/identifier.go @@ -0,0 +1,13 @@ +package remote + +import ( + "github.com/google/go-containerregistry/pkg/name" +) + +type DigestIdentifier struct { + Digest name.Digest +} + +func (d DigestIdentifier) String() string { + return d.Digest.String() +} diff --git a/vendor/github.com/buildpacks/imgutil/remote/remote.go b/vendor/github.com/buildpacks/imgutil/remote/remote.go new file mode 100644 index 0000000000..d861925aea --- /dev/null +++ b/vendor/github.com/buildpacks/imgutil/remote/remote.go @@ -0,0 +1,734 @@ +package remote + +import ( + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/buildpacks/imgutil/layer" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/remote" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/google/go-containerregistry/pkg/v1/types" + "github.com/pkg/errors" + + "github.com/buildpacks/imgutil" +) + +const maxRetries = 2 + +type Image struct { + keychain authn.Keychain + repoName string + image v1.Image + prevLayers []v1.Layer +} + +type options struct { + platform imgutil.Platform + baseImageRepoName string + prevImageRepoName string +} + +type ImageOption func(*options) error + +//WithPreviousImage loads an existing image as a source for reusable layers. +//Use with ReuseLayer(). +//Ignored if image is not found. +func WithPreviousImage(imageName string) ImageOption { + return func(opts *options) error { + opts.prevImageRepoName = imageName + return nil + } +} + +//FromBaseImage loads an existing image as the config and layers for the new image. +//Ignored if image is not found. +func FromBaseImage(imageName string) ImageOption { + return func(opts *options) error { + opts.baseImageRepoName = imageName + return nil + } +} + +//WithDefaultPlatform provides Architecture/OS/OSVersion defaults for the new image. +//Defaults for a new image are ignored when FromBaseImage returns an image. +//FromBaseImage and WithPreviousImage will use the platform to choose an image from a manifest list. +func WithDefaultPlatform(platform imgutil.Platform) ImageOption { + return func(opts *options) error { + opts.platform = platform + return nil + } +} + +//NewImage returns a new Image that can be modified and saved to a Docker daemon. +func NewImage(repoName string, keychain authn.Keychain, ops ...ImageOption) (*Image, error) { + imageOpts := &options{} + for _, op := range ops { + if err := op(imageOpts); err != nil { + return nil, err + } + } + + platform := defaultPlatform() + if (imageOpts.platform != imgutil.Platform{}) { + platform = imageOpts.platform + } + + image, err := emptyImage(platform) + if err != nil { + return nil, err + } + + ri := &Image{ + keychain: keychain, + repoName: repoName, + image: image, + } + + if imageOpts.prevImageRepoName != "" { + if err := processPreviousImageOption(ri, imageOpts.prevImageRepoName, platform); err != nil { + return nil, err + } + } + + if imageOpts.baseImageRepoName != "" { + if err := processBaseImageOption(ri, imageOpts.baseImageRepoName, platform); err != nil { + return nil, err + } + } + + imgOS, err := ri.OS() + if err != nil { + return nil, err + } + if imgOS == "windows" { + if err := prepareNewWindowsImage(ri); err != nil { + return nil, err + } + } + + return ri, nil +} + +func processPreviousImageOption(ri *Image, prevImageRepoName string, platform imgutil.Platform) error { + prevImage, err := newV1Image(ri.keychain, prevImageRepoName, platform) + if err != nil { + return err + } + + prevLayers, err := prevImage.Layers() + if err != nil { + return errors.Wrapf(err, "getting layers for previous image with repo name %q", prevImageRepoName) + } + + ri.prevLayers = prevLayers + + return nil +} + +func processBaseImageOption(ri *Image, baseImageRepoName string, platform imgutil.Platform) error { + baseImage, err := newV1Image(ri.keychain, baseImageRepoName, platform) + if err != nil { + return err + } + + ri.image = baseImage + + return nil +} + +func prepareNewWindowsImage(ri *Image) error { + // only append base layer to empty image + cfgFile, err := ri.image.ConfigFile() + if err != nil { + return err + } + if len(cfgFile.RootFS.DiffIDs) > 0 { + return nil + } + + layerBytes, err := layer.WindowsBaseLayer() + if err != nil { + return err + } + + windowsBaseLayer, err := tarball.LayerFromReader(layerBytes) + if err != nil { + return err + } + + image, err := mutate.AppendLayers(ri.image, windowsBaseLayer) + if err != nil { + return err + } + + ri.image = image + + return nil +} + +func newV1Image(keychain authn.Keychain, repoName string, platform imgutil.Platform) (v1.Image, error) { + ref, auth, err := referenceForRepoName(keychain, repoName) + if err != nil { + return nil, err + } + + v1Platform := v1.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + } + + var image v1.Image + for i := 0; i <= maxRetries; i++ { + time.Sleep(100 * time.Duration(i) * time.Millisecond) // wait if retrying + image, err = remote.Image(ref, remote.WithAuth(auth), remote.WithTransport(http.DefaultTransport), remote.WithPlatform(v1Platform)) + if err != nil { + if err == io.EOF && i != maxRetries { + continue // retry if EOF + } + if transportErr, ok := err.(*transport.Error); ok && len(transportErr.Errors) > 0 { + switch transportErr.StatusCode { + case http.StatusNotFound, http.StatusUnauthorized: + return emptyImage(platform) + } + } + if strings.Contains(err.Error(), "no child with platform") { + return emptyImage(platform) + } + return nil, errors.Wrapf(err, "connect to repo store %q", repoName) + } + break + } + + return image, nil +} + +func emptyImage(platform imgutil.Platform) (v1.Image, error) { + cfg := &v1.ConfigFile{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + RootFS: v1.RootFS{ + Type: "layers", + DiffIDs: []v1.Hash{}, + }, + } + + return mutate.ConfigFile(empty.Image, cfg) +} + +func defaultPlatform() imgutil.Platform { + return imgutil.Platform{ + OS: "linux", + Architecture: "amd64", + } +} + +func referenceForRepoName(keychain authn.Keychain, ref string) (name.Reference, authn.Authenticator, error) { + var auth authn.Authenticator + r, err := name.ParseReference(ref, name.WeakValidation) + if err != nil { + return nil, nil, err + } + + auth, err = keychain.Resolve(r.Context().Registry) + if err != nil { + return nil, nil, err + } + return r, auth, nil +} + +func (i *Image) Label(key string) (string, error) { + cfg, err := i.image.ConfigFile() + if err != nil { + return "", errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return "", fmt.Errorf("missing config for image %q", i.repoName) + } + labels := cfg.Config.Labels + return labels[key], nil +} + +func (i *Image) Labels() (map[string]string, error) { + cfg, err := i.image.ConfigFile() + if err != nil { + return nil, errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return nil, fmt.Errorf("missing config for image %q", i.repoName) + } + return cfg.Config.Labels, nil +} + +func (i *Image) Env(key string) (string, error) { + cfg, err := i.image.ConfigFile() + if err != nil { + return "", errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return "", fmt.Errorf("missing config for image %q", i.repoName) + } + for _, envVar := range cfg.Config.Env { + parts := strings.Split(envVar, "=") + if parts[0] == key { + return parts[1], nil + } + } + return "", nil +} + +func (i *Image) Entrypoint() ([]string, error) { + cfg, err := i.image.ConfigFile() + if err != nil { + return nil, errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return nil, fmt.Errorf("missing config for image %q", i.repoName) + } + return cfg.Config.Entrypoint, nil +} + +func (i *Image) OS() (string, error) { + cfg, err := i.image.ConfigFile() + if err != nil { + return "", errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return "", fmt.Errorf("missing config for image %q", i.repoName) + } + if cfg.OS == "" { + return "", fmt.Errorf("missing OS for image %q", i.repoName) + } + return cfg.OS, nil +} + +func (i *Image) OSVersion() (string, error) { + cfg, err := i.image.ConfigFile() + if err != nil { + return "", errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return "", fmt.Errorf("missing config for image %q", i.repoName) + } + return cfg.OSVersion, nil +} + +func (i *Image) Architecture() (string, error) { + cfg, err := i.image.ConfigFile() + if err != nil { + return "", errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return "", fmt.Errorf("missing config for image %q", i.repoName) + } + if cfg.Architecture == "" { + return "", fmt.Errorf("missing Architecture for image %q", i.repoName) + } + return cfg.Architecture, nil +} + +func (i *Image) Rename(name string) { + i.repoName = name +} + +func (i *Image) Name() string { + return i.repoName +} + +func (i *Image) Found() bool { + _, err := i.found() + return err == nil +} + +func (i *Image) CheckReadWriteAccess() bool { + ref, _, err := referenceForRepoName(i.keychain, i.repoName) + if err != nil { + return false + } + return i.CheckReadAccess() && remote.CheckPushPermission(ref, i.keychain, http.DefaultTransport) == nil +} + +func (i *Image) CheckReadAccess() bool { + _, err := i.found() + if err != nil { + if transportErr, ok := err.(*transport.Error); ok { + return transportErr.StatusCode != http.StatusUnauthorized && + transportErr.StatusCode != http.StatusForbidden + } + return false + } + return true +} + +func (i *Image) found() (*v1.Descriptor, error) { + ref, auth, err := referenceForRepoName(i.keychain, i.repoName) + if err != nil { + return nil, err + } + return remote.Head(ref, remote.WithAuth(auth), remote.WithTransport(http.DefaultTransport)) +} + +func (i *Image) Identifier() (imgutil.Identifier, error) { + ref, err := name.ParseReference(i.repoName, name.WeakValidation) + if err != nil { + return nil, errors.Wrapf(err, "parsing reference for image %q", i.repoName) + } + + hash, err := i.image.Digest() + if err != nil { + return nil, errors.Wrapf(err, "getting digest for image %q", i.repoName) + } + + digestRef, err := name.NewDigest(fmt.Sprintf("%s@%s", ref.Context().Name(), hash.String()), name.WeakValidation) + if err != nil { + return nil, errors.Wrap(err, "creating digest reference") + } + + return DigestIdentifier{ + Digest: digestRef, + }, nil +} + +func (i *Image) CreatedAt() (time.Time, error) { + configFile, err := i.image.ConfigFile() + if err != nil { + return time.Time{}, errors.Wrapf(err, "getting createdAt time for image %q", i.repoName) + } + return configFile.Created.UTC(), nil +} + +func (i *Image) Rebase(baseTopLayer string, newBase imgutil.Image) error { + newBaseRemote, ok := newBase.(*Image) + if !ok { + return errors.New("expected new base to be a remote image") + } + + newImage, err := mutate.Rebase(i.image, &subImage{img: i.image, topDiffID: baseTopLayer}, newBaseRemote.image) + if err != nil { + return errors.Wrap(err, "rebase") + } + + newImageConfig, err := newImage.ConfigFile() + if err != nil { + return err + } + + newBaseRemoteConfig, err := newBaseRemote.image.ConfigFile() + if err != nil { + return err + } + + newImageConfig.Architecture = newBaseRemoteConfig.Architecture + newImageConfig.OS = newBaseRemoteConfig.OS + newImageConfig.OSVersion = newBaseRemoteConfig.OSVersion + + newImage, err = mutate.ConfigFile(newImage, newImageConfig) + if err != nil { + return err + } + + i.image = newImage + return nil +} + +func (i *Image) SetLabel(key, val string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + config := *configFile.Config.DeepCopy() + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[key] = val + i.image, err = mutate.Config(i.image, config) + return err +} + +func (i *Image) RemoveLabel(key string) error { + cfg, err := i.image.ConfigFile() + if err != nil { + return errors.Wrapf(err, "getting config file for image %q", i.repoName) + } + if cfg == nil { + return fmt.Errorf("missing config for image %q", i.repoName) + } + config := *cfg.Config.DeepCopy() + delete(config.Labels, key) + i.image, err = mutate.Config(i.image, config) + return err +} + +func (i *Image) SetEnv(key, val string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + config := *configFile.Config.DeepCopy() + ignoreCase := configFile.OS == "windows" + for idx, e := range config.Env { + parts := strings.Split(e, "=") + foundKey := parts[0] + searchKey := key + if ignoreCase { + foundKey = strings.ToUpper(foundKey) + searchKey = strings.ToUpper(searchKey) + } + if foundKey == searchKey { + config.Env[idx] = fmt.Sprintf("%s=%s", key, val) + i.image, err = mutate.Config(i.image, config) + return err + } + } + config.Env = append(config.Env, fmt.Sprintf("%s=%s", key, val)) + i.image, err = mutate.Config(i.image, config) + return err +} + +func (i *Image) SetWorkingDir(dir string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + config := *configFile.Config.DeepCopy() + config.WorkingDir = dir + i.image, err = mutate.Config(i.image, config) + return err +} + +func (i *Image) SetEntrypoint(ep ...string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + config := *configFile.Config.DeepCopy() + config.Entrypoint = ep + i.image, err = mutate.Config(i.image, config) + return err +} + +func (i *Image) SetCmd(cmd ...string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + config := *configFile.Config.DeepCopy() + config.Cmd = cmd + i.image, err = mutate.Config(i.image, config) + return err +} + +func (i *Image) SetOS(osVal string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + configFile.OS = osVal + i.image, err = mutate.ConfigFile(i.image, configFile) + return err +} + +func (i *Image) SetOSVersion(osVersion string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + configFile.OSVersion = osVersion + i.image, err = mutate.ConfigFile(i.image, configFile) + return err +} + +func (i *Image) SetArchitecture(architecture string) error { + configFile, err := i.image.ConfigFile() + if err != nil { + return err + } + configFile.Architecture = architecture + i.image, err = mutate.ConfigFile(i.image, configFile) + return err +} + +func (i *Image) TopLayer() (string, error) { + all, err := i.image.Layers() + if err != nil { + return "", err + } + if len(all) == 0 { + return "", fmt.Errorf("image %q has no layers", i.Name()) + } + topLayer := all[len(all)-1] + hex, err := topLayer.DiffID() + if err != nil { + return "", err + } + return hex.String(), nil +} + +func (i *Image) GetLayer(sha string) (io.ReadCloser, error) { + layers, err := i.image.Layers() + if err != nil { + return nil, err + } + + layer, err := findLayerWithSha(layers, sha) + if err != nil { + return nil, err + } + + return layer.Uncompressed() +} + +func (i *Image) AddLayer(path string) error { + layer, err := tarball.LayerFromFile(path) + if err != nil { + return err + } + i.image, err = mutate.AppendLayers(i.image, layer) + if err != nil { + return errors.Wrap(err, "add layer") + } + return nil +} + +func (i *Image) AddLayerWithDiffID(path, diffID string) error { + // this is equivalent to AddLayer in the remote case + // it exists to provide optimize performance for local images + return i.AddLayer(path) +} + +func (i *Image) ReuseLayer(sha string) error { + layer, err := findLayerWithSha(i.prevLayers, sha) + if err != nil { + return err + } + i.image, err = mutate.AppendLayers(i.image, layer) + return err +} + +func findLayerWithSha(layers []v1.Layer, diffID string) (v1.Layer, error) { + for _, layer := range layers { + dID, err := layer.DiffID() + if err != nil { + return nil, errors.Wrap(err, "get diff ID for previous image layer") + } + if diffID == dID.String() { + return layer, nil + } + } + return nil, fmt.Errorf("previous image did not have layer with diff id %q", diffID) +} + +func (i *Image) Save(additionalNames ...string) error { + var err error + + allNames := append([]string{i.repoName}, additionalNames...) + + i.image, err = mutate.CreatedAt(i.image, v1.Time{Time: imgutil.NormalizedDateTime}) + if err != nil { + return errors.Wrap(err, "set creation time") + } + + cfg, err := i.image.ConfigFile() + if err != nil { + return errors.Wrap(err, "get image config") + } + cfg = cfg.DeepCopy() + + layers, err := i.image.Layers() + if err != nil { + return errors.Wrap(err, "get image layers") + } + cfg.History = make([]v1.History, len(layers)) + for i := range cfg.History { + cfg.History[i] = v1.History{ + Created: v1.Time{Time: imgutil.NormalizedDateTime}, + } + } + + cfg.DockerVersion = "" + cfg.Container = "" + i.image, err = mutate.ConfigFile(i.image, cfg) + if err != nil { + return errors.Wrap(err, "zeroing history") + } + + var diagnostics []imgutil.SaveDiagnostic + for _, n := range allNames { + if err := i.doSave(n); err != nil { + diagnostics = append(diagnostics, imgutil.SaveDiagnostic{ImageName: n, Cause: err}) + } + } + if len(diagnostics) > 0 { + return imgutil.SaveError{Errors: diagnostics} + } + + return nil +} + +func (i *Image) doSave(imageName string) error { + ref, auth, err := referenceForRepoName(i.keychain, imageName) + if err != nil { + return err + } + return remote.Write(ref, i.image, remote.WithAuth(auth)) +} + +func (i *Image) Delete() error { + id, err := i.Identifier() + if err != nil { + return err + } + ref, auth, err := referenceForRepoName(i.keychain, id.String()) + if err != nil { + return err + } + return remote.Delete(ref, remote.WithAuth(auth)) +} + +func (i *Image) ManifestSize() (int64, error) { + return i.image.Size() +} + +type subImage struct { + img v1.Image + topDiffID string +} + +func (si *subImage) Layers() ([]v1.Layer, error) { + all, err := si.img.Layers() + if err != nil { + return nil, err + } + for i, l := range all { + d, err := l.DiffID() + if err != nil { + return nil, err + } + if d.String() == si.topDiffID { + return all[0 : i+1], nil + } + } + return nil, errors.New("could not find base layer in image") +} +func (si *subImage) ConfigFile() (*v1.ConfigFile, error) { return si.img.ConfigFile() } +func (si *subImage) BlobSet() (map[v1.Hash]struct{}, error) { panic("Not Implemented") } +func (si *subImage) MediaType() (types.MediaType, error) { panic("Not Implemented") } +func (si *subImage) ConfigName() (v1.Hash, error) { panic("Not Implemented") } +func (si *subImage) RawConfigFile() ([]byte, error) { panic("Not Implemented") } +func (si *subImage) Digest() (v1.Hash, error) { panic("Not Implemented") } +func (si *subImage) Manifest() (*v1.Manifest, error) { panic("Not Implemented") } +func (si *subImage) RawManifest() ([]byte, error) { panic("Not Implemented") } +func (si *subImage) LayerByDigest(v1.Hash) (v1.Layer, error) { panic("Not Implemented") } +func (si *subImage) LayerByDiffID(v1.Hash) (v1.Layer, error) { panic("Not Implemented") } +func (si *subImage) Size() (int64, error) { panic("Not Implemented") } diff --git a/vendor/github.com/buildpacks/lifecycle/.dockerignore b/vendor/github.com/buildpacks/lifecycle/.dockerignore new file mode 100644 index 0000000000..1fcb1529f8 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/.dockerignore @@ -0,0 +1 @@ +out diff --git a/vendor/github.com/buildpacks/lifecycle/.gitignore b/vendor/github.com/buildpacks/lifecycle/.gitignore new file mode 100644 index 0000000000..ebc79ed6d5 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/.gitignore @@ -0,0 +1,10 @@ +.DS_Store +.idea +*.coverprofile +*.test +*~ +/out + +acceptance/testdata/*/**/container/cnb/lifecycle/* +acceptance/testdata/*/**/container/docker-config/* +acceptance/testdata/exporter/container/layers/*analyzed.toml diff --git a/vendor/github.com/buildpacks/lifecycle/.gitpod.yml b/vendor/github.com/buildpacks/lifecycle/.gitpod.yml new file mode 100644 index 0000000000..098b5ca3e6 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/.gitpod.yml @@ -0,0 +1,19 @@ + +tasks: + # allow socket to be writable by all + # (necessary for acceptance tests / calls from within container) + - init: chmod ugo+w /var/run/docker.sock + # build linux to install dependencies + - init: make tidy build-linux +github: + prebuilds: + master: true + branches: true + pullRequests: true + pullRequestsFromForks: true + addCheck: true + +vscode: + extensions: + - golang.go + - ms-azuretools.vscode-docker diff --git a/vendor/github.com/buildpacks/lifecycle/CODEOWNERS b/vendor/github.com/buildpacks/lifecycle/CODEOWNERS new file mode 100644 index 0000000000..d3a11dc1d0 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/CODEOWNERS @@ -0,0 +1 @@ +* @buildpacks/implementation-maintainers diff --git a/vendor/github.com/buildpacks/lifecycle/CONTRIBUTING.md b/vendor/github.com/buildpacks/lifecycle/CONTRIBUTING.md new file mode 100644 index 0000000000..cf795ca2e0 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/CONTRIBUTING.md @@ -0,0 +1,42 @@ +## Policies + +This repository adheres to the following project policies: + +- [Code of Conduct][code-of-conduct] - How we should act with each other. +- [Contributing][contributing] - General contributing standards. +- [Security][security] - Reporting security concerns. +- [Support][support] - Getting support. + +## Contributing to this repository + +### Welcome + +We welcome contributions to this repository! To get a sense of what the team is currently focusing on, check out our [milestones](https://github.com/buildpacks/lifecycle/milestones). Issues labeled [good first issue](https://github.com/buildpacks/lifecycle/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22) and issues in our [docs repo](https://github.com/buildpacks/docs/issues?q=is%3Aissue+is%3Aopen+label%3Ateam%2Fimplementations) are great places to get started, but you are welcome to work on any issue that interests you. For issues requiring a greater degree of coordination, such as those labeled `status/needs-discussion` or that are part of larger epics, please reach out in the #implementation channel in [Slack](https://slack.buildpacks.io/). + +### Development + +Aside from the policies above, you may find [DEVELOPMENT.md](DEVELOPMENT.md) helpful in developing in this repository. + +### Background + +Here are some topics that might be helpful in further understanding the lifecycle: + +* Cloud Native Buildpacks platform api spec + * Example platforms: [pack CLI](https://github.com/buildpack/pack), [Tekton](https://github.com/tektoncd/catalog/blob/master/task/buildpacks/0.1/README.md) +* Cloud Native Buildpacks buildpack api spec + * Example buildpack providers: [Google](https://github.com/GoogleCloudPlatform/buildpacks), [Heroku](https://www.heroku.com/), [Paketo](https://paketo.io/) +* The Open Container Initiative (OCI) and [OCI image spec](https://github.com/opencontainers/image-spec) +* Questions to deepen understanding: + * What are the different [lifecycle phases](https://buildpacks.io/docs/concepts/components/lifecycle/)? What is the purpose of each phase? + * What is a [builder](https://buildpacks.io/docs/concepts/components/builder/)? Is it required to run the lifecycle? + * What is the [untrusted builder workflow](https://medium.com/buildpacks/faster-more-secure-builds-with-pack-0-11-0-4d0c633ca619)? Why do we have this flow? + * What is the [launcher](https://github.com/buildpacks/spec/blob/main/platform.md#launch)? Why do we have a launcher? + * What does a [buildpack](https://buildpacks.io/docs/concepts/components/buildpack/) do? Where does it write data? How does it communicate with the lifecycle? + * What does a [platform](https://buildpacks.io/docs/concepts/components/platform/) do? What things does it know about that the lifecycle does not? How does it communicate with the lifecycle? + * What is a [stack](https://buildpacks.io/docs/concepts/components/stack/)? Who produces stacks? Why is the stack concept important for the lifecycle? + +[code-of-conduct]: https://github.com/buildpacks/.github/blob/main/CODE_OF_CONDUCT.md +[contributing]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTING.md +[security]: https://github.com/buildpacks/.github/blob/main/SECURITY.md +[support]: https://github.com/buildpacks/.github/blob/main/SUPPORT.md +[pull-request-process]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTIONS.md#pull-request-process diff --git a/vendor/github.com/buildpacks/lifecycle/DEVELOPMENT.md b/vendor/github.com/buildpacks/lifecycle/DEVELOPMENT.md new file mode 100644 index 0000000000..56cd50d784 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/DEVELOPMENT.md @@ -0,0 +1,92 @@ +# Development + +## Prerequisites + +* [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) + * macOS: _(built-in)_ + * Windows: + * `choco install git -y` + * `git config --global core.autocrlf false` +* [Go](https://golang.org/doc/install) + * macOS: `brew install go` + * Windows: `choco install golang -y` +* [Docker](https://www.docker.com/products/docker-desktop) +* Make (and build tools) + * macOS: `xcode-select --install` + * Windows: + * `choco install cygwin make -y` + * `[Environment]::SetEnvironmentVariable("PATH", "C:\tools\cygwin\bin;$ENV:PATH", "MACHINE")` + +### Caveats + +* The acceptance tests require the docker daemon to be able to communicate with a local containerized insecure registry. On Docker Desktop 3.3.x, this may result in failures such as: `Expected nil: push response: : Get http://localhost:/v2/: dial tcp [::1]:: connect: connection refused`. To fix these failures, it may be necessary to add the following to the Docker Desktop Engine config: + * macOS: Docker > Preferences > Docker Engine: +``` + "insecure-registries": [ + "/32" + ] +``` + +### Testing GitHub actions on forks + +The lifecycle release process involves chaining a series of GitHub actions together such that: +* The "build" workflow creates the artifacts + * .tgz files containing the lifecycle binaries, shasums for the .tgz files, a cosign public key, an SBOM, etc. + * OCI images containing the lifecycle binaries, tagged with their commit sha (for more information, see RELEASE.md) +* The "draft-release" workflow finds the artifacts and downloads them, creating the draft release +* The "post-release" workflow re-tags the OCI images that were created during the "build" workflow with the release version + +It can be rather cumbersome to test changes to these workflows, as they are heavily intertwined. Thus we recommend forking the buildpacks/lifecycle repository in GitHub and running through the entire release process end-to-end. +For the fork, it is necessary to add the following secrets: +* COSIGN_PASSWORD (see [cosign](https://github.com/sigstore/cosign#generate-a-keypair)) +* COSIGN_PRIVATE_KEY +* DOCKER_PASSWORD (if not using ghcr.io) +* DOCKER_USERNAME (if not using ghcr.io) + +The tools/test-fork.sh script can be used to update the source code to reflect the state of the fork. +It can be invoked like so: `./tools/test-fork.sh ` + +## Tasks + +To test, build, and package binaries into an archive, simply run: + +```bash +$ make all +``` +This will create archives at `out/lifecycle-+linux.x86-64.tgz` and `out/lifecycle-+windows.x86-64.tgz`. + +`LIFECYCLE_VERSION` defaults to the value returned by `git describe --tags` if not on a release branch (for more information about the release process, see [RELEASE](RELEASE.md)). It can be changed by prepending `LIFECYCLE_VERSION=` to the +`make` command. For example: + +```bash +$ LIFECYCLE_VERSION=1.2.3 make all +``` + +Steps can also be run individually as shown below. + +### Test + +Formats, vets, and tests the code. + +```bash +$ make test +``` + +### Build + +Builds binaries to `out/linux/lifecycle/` and `out/windows/lifecycle/`. + +```bash +$ make build +``` + +> To clean the `out/` directory, run `make clean`. + +### Package + +Creates archives at `out/lifecycle-+linux.x86-64.tgz` and `out/lifecycle-+windows.x86-64.tgz`, using the contents of the +`out/linux/lifecycle/` directory, for the given (or default) `LIFECYCLE_VERSION`. + +```bash +$ make package +``` diff --git a/vendor/github.com/buildpacks/lifecycle/IMAGE.md b/vendor/github.com/buildpacks/lifecycle/IMAGE.md new file mode 100644 index 0000000000..d3ab324c29 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/IMAGE.md @@ -0,0 +1,39 @@ +# Quick reference + +This image is maintained by the [Cloud Native Buildpacks project](https://buildpacks.io/). The maintainers can be contacted via the [Cloud Native Buildpacks Slack](https://slack.buildpacks.io/), or by opening an issue on the `buildpacks/lifecycle` [GitHub repo](https://github.com/buildpacks/lifecycle). + +# Supported tags + +Supported tags are semver-versioned manifest lists - e.g., `0.12.0` or `0.12.0-rc.1`, pointing to one of the following os/architectures: +* `linux/amd64` +* `linux/arm64` +* `windows/amd64` + +# About this image + +Images are built in [GitHub actions](https://github.com/buildpacks/lifecycle/actions) and signed with [`cosign`](https://github.com/sigstore/cosign). To verify: +* Locate the public key `lifecycle-v-cosign.pub` on the [releases page](https://github.com/buildpacks/lifecycle/releases) +* Run: +``` +cosign verify -key lifecycle-v-cosign.pub buildpacksio/lifecycle: +``` + +A CycloneDX SBOM is "attached" to the image and signed with [`cosign`](https://github.com/sigstore/cosign). To verify: +* Locate the public key `lifecycle-v-cosign.pub` on the [releases page](https://github.com/buildpacks/lifecycle/releases) +* Run: +``` +cosign version # must be at least 1.2.0 +cosign verify -key cosign.pub -a tag= -attachment sbom buildpacksio/lifecycle: +cosign download sbom buildpacksio/lifecycle: +``` + +# Using this image + +With [pack](https://github.com/buildpack/pack): +* `pack build --lifecycle-image buildpacksio/lifecycle:` + +With [tekton](https://github.com/tektoncd/catalog/tree/main/task/buildpacks-phases/0.2): +* Provide as param `LIFECYCLE_IMAGE` in taskrun + +*** +[Source](https://github.com/buildpacks/lifecycle/blob/main/IMAGE.md) for this page \ No newline at end of file diff --git a/vendor/github.com/buildpacks/lifecycle/LICENSE b/vendor/github.com/buildpacks/lifecycle/LICENSE new file mode 100644 index 0000000000..a83b632218 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Stephen Levine + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/buildpacks/lifecycle/Makefile b/vendor/github.com/buildpacks/lifecycle/Makefile new file mode 100644 index 0000000000..aa41d27c02 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/Makefile @@ -0,0 +1,315 @@ +ifeq ($(OS),Windows_NT) +SHELL:=cmd.exe +PWD?=$(subst /,\,${CURDIR}) +LDFLAGS=-s -w +BLANK:= +/:=\$(BLANK) +else +/:=/ +endif + +PARSED_COMMIT:=$(shell git rev-parse --short HEAD) + +ifeq ($(LIFECYCLE_VERSION),) +LIFECYCLE_VERSION:=$(shell go run tools/version/main.go) +LIFECYCLE_IMAGE_TAG?=$(PARSED_COMMIT) +else +LIFECYCLE_IMAGE_TAG?=$(LIFECYCLE_VERSION) +endif + +ACCEPTANCE_TIMEOUT?=2400s +GOCMD?=go +GOENV=GOARCH=$(GOARCH) CGO_ENABLED=0 +LIFECYCLE_DESCRIPTOR_PATH?=lifecycle.toml +SCM_REPO?=github.com/buildpacks/lifecycle +SCM_COMMIT?=$(PARSED_COMMIT) +LDFLAGS=-s -w +LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.SCMRepository=$(SCM_REPO)' +LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.SCMCommit=$(SCM_COMMIT)' +LDFLAGS+=-X 'github.com/buildpacks/lifecycle/cmd.Version=$(LIFECYCLE_VERSION)' +GOBUILD:=go build $(GOFLAGS) -ldflags "$(LDFLAGS)" +GOTEST=$(GOCMD) test $(GOFLAGS) +BUILD_DIR?=$(PWD)$/out +LINUX_COMPILATION_IMAGE?=golang:1.16-alpine +WINDOWS_COMPILATION_IMAGE?=golang:1.16-windowsservercore-1809 +SOURCE_COMPILATION_IMAGE?=lifecycle-img +BUILD_CTR?=lifecycle-ctr +DOCKER_CMD?=make test + +GOFILES := $(shell $(GOCMD) run tools$/lister$/main.go) + +all: test build package + +build: build-linux-amd64 build-linux-arm64 build-windows-amd64 + +build-linux-amd64: build-linux-amd64-lifecycle build-linux-amd64-symlinks build-linux-amd64-launcher +build-linux-arm64: build-linux-arm64-lifecycle build-linux-arm64-symlinks build-linux-arm64-launcher +build-windows-amd64: build-windows-amd64-lifecycle build-windows-amd64-symlinks build-windows-amd64-launcher + +build-image-linux-amd64: build-linux-amd64 package-linux-amd64 +build-image-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.x86-64.tgz +build-image-linux-amd64: + $(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch amd64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG) + +build-image-linux-arm64: build-linux-arm64 package-linux-arm64 +build-image-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+linux.arm64.tgz +build-image-linux-arm64: + $(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os linux -arch arm64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG) + +build-image-windows-amd64: build-windows-amd64 package-windows-amd64 +build-image-windows-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+windows.x86-64.tgz +build-image-windows-amd64: + $(GOCMD) run ./tools/image/main.go -daemon -lifecyclePath $(ARCHIVE_PATH) -os windows -arch amd64 -tag lifecycle:$(LIFECYCLE_IMAGE_TAG) + +build-linux-amd64-lifecycle: $(BUILD_DIR)/linux-amd64/lifecycle/lifecycle + +build-linux-arm64-lifecycle: $(BUILD_DIR)/linux-arm64/lifecycle/lifecycle + +$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: export GOOS:=linux +$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: export GOARCH:=amd64 +$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: $(GOFILES) +$(BUILD_DIR)/linux-amd64/lifecycle/lifecycle: + @echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..." + mkdir -p $(OUT_DIR) + $(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle + +$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: export GOOS:=linux +$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: export GOARCH:=arm64 +$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: $(GOFILES) +$(BUILD_DIR)/linux-arm64/lifecycle/lifecycle: + @echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..." + mkdir -p $(OUT_DIR) + $(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle + +build-linux-amd64-launcher: $(BUILD_DIR)/linux-amd64/lifecycle/launcher + +$(BUILD_DIR)/linux-amd64/lifecycle/launcher: export GOOS:=linux +$(BUILD_DIR)/linux-amd64/lifecycle/launcher: export GOARCH:=amd64 +$(BUILD_DIR)/linux-amd64/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/linux-amd64/lifecycle/launcher: $(GOFILES) +$(BUILD_DIR)/linux-amd64/lifecycle/launcher: + @echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..." + mkdir -p $(OUT_DIR) + $(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher + test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3 + +build-linux-arm64-launcher: $(BUILD_DIR)/linux-arm64/lifecycle/launcher + +$(BUILD_DIR)/linux-arm64/lifecycle/launcher: export GOOS:=linux +$(BUILD_DIR)/linux-arm64/lifecycle/launcher: export GOARCH:=arm64 +$(BUILD_DIR)/linux-arm64/lifecycle/launcher: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/linux-arm64/lifecycle/launcher: $(GOFILES) +$(BUILD_DIR)/linux-arm64/lifecycle/launcher: + @echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..." + mkdir -p $(OUT_DIR) + $(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher + test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 3 + +build-linux-amd64-symlinks: export GOOS:=linux +build-linux-amd64-symlinks: export GOARCH:=amd64 +build-linux-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +build-linux-amd64-symlinks: + @echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..." + ln -sf lifecycle $(OUT_DIR)/detector + ln -sf lifecycle $(OUT_DIR)/analyzer + ln -sf lifecycle $(OUT_DIR)/restorer + ln -sf lifecycle $(OUT_DIR)/builder + ln -sf lifecycle $(OUT_DIR)/exporter + ln -sf lifecycle $(OUT_DIR)/rebaser + ln -sf lifecycle $(OUT_DIR)/creator + +build-linux-arm64-symlinks: export GOOS:=linux +build-linux-arm64-symlinks: export GOARCH:=arm64 +build-linux-arm64-symlinks: OUT_DIR?=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +build-linux-arm64-symlinks: + @echo "> Creating phase symlinks for $(GOOS)/$(GOARCH)..." + ln -sf lifecycle $(OUT_DIR)/detector + ln -sf lifecycle $(OUT_DIR)/analyzer + ln -sf lifecycle $(OUT_DIR)/restorer + ln -sf lifecycle $(OUT_DIR)/builder + ln -sf lifecycle $(OUT_DIR)/exporter + ln -sf lifecycle $(OUT_DIR)/rebaser + ln -sf lifecycle $(OUT_DIR)/creator + +build-windows-amd64-lifecycle: $(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe + +$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: export GOOS:=windows +$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: export GOARCH:=amd64 +$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle +$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: $(GOFILES) +$(BUILD_DIR)/windows-amd64/lifecycle/lifecycle.exe: + @echo "> Building lifecycle/lifecycle for $(GOOS)/$(GOARCH)..." + $(GOBUILD) -o $(OUT_DIR)$/lifecycle.exe -a .$/cmd$/lifecycle + +build-windows-amd64-launcher: $(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe + +$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: export GOOS:=windows +$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: export GOARCH:=amd64 +$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle +$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: $(GOFILES) +$(BUILD_DIR)/windows-amd64/lifecycle/launcher.exe: + @echo "> Building lifecycle/launcher for $(GOOS)/$(GOARCH)..." + $(GOBUILD) -o $(OUT_DIR)$/launcher.exe -a .$/cmd$/launcher + +build-windows-amd64-symlinks: export GOOS:=windows +build-windows-amd64-symlinks: export GOARCH:=amd64 +build-windows-amd64-symlinks: OUT_DIR?=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle +build-windows-amd64-symlinks: + @echo "> Creating phase symlinks for Windows..." +ifeq ($(OS),Windows_NT) + call del $(OUT_DIR)$/detector.exe + call del $(OUT_DIR)$/analyzer.exe + call del $(OUT_DIR)$/restorer.exe + call del $(OUT_DIR)$/builder.exe + call del $(OUT_DIR)$/exporter.exe + call del $(OUT_DIR)$/rebaser.exe + call del $(OUT_DIR)$/creator.exe + call mklink $(OUT_DIR)$/detector.exe lifecycle.exe + call mklink $(OUT_DIR)$/analyzer.exe lifecycle.exe + call mklink $(OUT_DIR)$/restorer.exe lifecycle.exe + call mklink $(OUT_DIR)$/builder.exe lifecycle.exe + call mklink $(OUT_DIR)$/exporter.exe lifecycle.exe + call mklink $(OUT_DIR)$/rebaser.exe lifecycle.exe + call mklink $(OUT_DIR)$/creator.exe lifecycle.exe +else + ln -sf lifecycle.exe $(OUT_DIR)$/detector.exe + ln -sf lifecycle.exe $(OUT_DIR)$/analyzer.exe + ln -sf lifecycle.exe $(OUT_DIR)$/restorer.exe + ln -sf lifecycle.exe $(OUT_DIR)$/builder.exe + ln -sf lifecycle.exe $(OUT_DIR)$/exporter.exe + ln -sf lifecycle.exe $(OUT_DIR)$/rebaser.exe + ln -sf lifecycle.exe $(OUT_DIR)$/creator.exe +endif + +build-darwin-amd64: build-darwin-amd64-lifecycle build-darwin-amd64-launcher + +build-darwin-amd64-lifecycle: $(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle +$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: export GOOS:=darwin +$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: export GOARCH:=amd64 +$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: OUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: $(GOFILES) +$(BUILD_DIR)/darwin-amd64/lifecycle/lifecycle: + @echo "> Building lifecycle for darwin/amd64..." + $(GOENV) $(GOBUILD) -o $(OUT_DIR)/lifecycle -a ./cmd/lifecycle + @echo "> Creating lifecycle symlinks for darwin/amd64..." + ln -sf lifecycle $(OUT_DIR)/detector + ln -sf lifecycle $(OUT_DIR)/analyzer + ln -sf lifecycle $(OUT_DIR)/restorer + ln -sf lifecycle $(OUT_DIR)/builder + ln -sf lifecycle $(OUT_DIR)/exporter + ln -sf lifecycle $(OUT_DIR)/rebaser + +build-darwin-amd64-launcher: $(BUILD_DIR)/darwin-amd64/lifecycle/launcher +$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: export GOOS:=darwin +$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: export GOARCH:=amd64 +$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: OUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: $(GOFILES) +$(BUILD_DIR)/darwin-amd64/lifecycle/launcher: + @echo "> Building launcher for darwin/amd64..." + mkdir -p $(OUT_DIR) + $(GOENV) $(GOBUILD) -o $(OUT_DIR)/launcher -a ./cmd/launcher + test $$(du -m $(OUT_DIR)/launcher|cut -f 1) -le 4 + +install-goimports: + @echo "> Installing goimports..." + $(GOCMD) install golang.org/x/tools/cmd/goimports@v0.1.2 + +install-yj: + @echo "> Installing yj..." + $(GOCMD) install github.com/sclevine/yj@v0.0.0-20210612025309-737bdf40a5d1 + +install-mockgen: + @echo "> Installing mockgen..." + $(GOCMD) install github.com/golang/mock/mockgen@v1.5.0 + +install-golangci-lint: + @echo "> Installing golangci-lint..." + $(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint@v1.30.0 + +lint: install-golangci-lint + @echo "> Linting code..." + @golangci-lint run -c golangci.yaml + +generate: install-mockgen + @echo "> Generating..." + $(GOCMD) generate + $(GOCMD) generate ./launch + +format: install-goimports + @echo "> Formating code..." + $(if $(shell goimports -l -w -local github.com/buildpacks/lifecycle .), @echo Fixed formatting errors. Re-run && exit 1) + +tidy: + @echo "> Tidying modules..." + $(GOCMD) mod tidy + cd tools && $(GOCMD) mod tidy + +test: unit acceptance + +# append coverage arguments +ifeq ($(TEST_COVERAGE), 1) +unit: GOTESTFLAGS:=$(GOTESTFLAGS) -coverprofile=./out/tests/coverage-unit.txt -covermode=atomic +endif +unit: out +unit: UNIT_PACKAGES=$(shell $(GOCMD) list ./... | grep -v acceptance) +unit: format lint tidy install-yj + @echo "> Running unit tests..." + $(GOTEST) $(GOTESTFLAGS) -v -count=1 $(UNIT_PACKAGES) + +out: + @mkdir out || (exit 0) + mkdir out$/tests || (exit 0) + +acceptance: format tidy + @echo "> Running acceptance tests..." + $(GOTEST) -v -count=1 -tags=acceptance -timeout=$(ACCEPTANCE_TIMEOUT) ./acceptance/... + +clean: + @echo "> Cleaning workspace..." + rm -rf $(BUILD_DIR) + +package: package-linux-amd64 package-linux-arm64 package-windows-amd64 + +package-linux-amd64: GOOS:=linux +package-linux-amd64: GOARCH:=amd64 +package-linux-amd64: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +package-linux-amd64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz +package-linux-amd64: PACKAGER=./tools/packager/main.go +package-linux-amd64: + @echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..." + $(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION) + +package-linux-arm64: GOOS:=linux +package-linux-arm64: GOARCH:=arm64 +package-linux-arm64: INPUT_DIR:=$(BUILD_DIR)/$(GOOS)-$(GOARCH)/lifecycle +package-linux-arm64: ARCHIVE_PATH=$(BUILD_DIR)/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).arm64.tgz +package-linux-arm64: PACKAGER=./tools/packager/main.go +package-linux-arm64: + @echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..." + $(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION) + +package-windows-amd64: GOOS:=windows +package-windows-amd64: GOARCH:=amd64 +package-windows-amd64: INPUT_DIR:=$(BUILD_DIR)$/$(GOOS)-$(GOARCH)$/lifecycle +package-windows-amd64: ARCHIVE_PATH=$(BUILD_DIR)$/lifecycle-v$(LIFECYCLE_VERSION)+$(GOOS).x86-64.tgz +package-windows-amd64: PACKAGER=.$/tools$/packager$/main.go +package-windows-amd64: + @echo "> Packaging lifecycle for $(GOOS)/$(GOARCH)..." + $(GOCMD) run $(PACKAGER) --inputDir $(INPUT_DIR) -archivePath $(ARCHIVE_PATH) -descriptorPath $(LIFECYCLE_DESCRIPTOR_PATH) -version $(LIFECYCLE_VERSION) + +# Ensure workdir is clean and build image from .git +docker-build-source-image-windows: $(GOFILES) +docker-build-source-image-windows: + $(if $(shell git status --short), @echo Uncommitted changes. Refusing to run. && exit 1) + docker build .git -f tools/Dockerfile.windows --tag $(SOURCE_COMPILATION_IMAGE) --build-arg image_tag=$(WINDOWS_COMPILATION_IMAGE) --cache-from=$(SOURCE_COMPILATION_IMAGE) --isolation=process --compress + +docker-run-windows: docker-build-source-image-windows +docker-run-windows: + @echo "> Running '$(DOCKER_CMD)' in docker windows..." + @docker volume rm -f lifecycle-out + docker run -v lifecycle-out:c:/lifecycle/out -e LIFECYCLE_VERSION -e PLATFORM_API -e BUILDPACK_API -v gopathcache:c:/gopath -v '\\.\pipe\docker_engine:\\.\pipe\docker_engine' --isolation=process --interactive --tty --rm $(SOURCE_COMPILATION_IMAGE) $(DOCKER_CMD) + docker run -v lifecycle-out:c:/lifecycle/out --rm $(SOURCE_COMPILATION_IMAGE) tar -cf- out | tar -xf- + @docker volume rm -f lifecycle-out + diff --git a/vendor/github.com/buildpacks/lifecycle/README.md b/vendor/github.com/buildpacks/lifecycle/README.md new file mode 100644 index 0000000000..138f53ee5d --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/README.md @@ -0,0 +1,61 @@ +# Lifecycle + +[![Build Status](https://github.com/buildpacks/lifecycle/workflows/build/badge.svg)](https://github.com/buildpacks/lifecycle/actions) +[![GoDoc](https://godoc.org/github.com/buildpacks/lifecycle?status.svg)](https://godoc.org/github.com/buildpacks/lifecycle) +[![codecov](https://codecov.io/gh/buildpacks/pack/branch/main/graph/badge.svg)](https://codecov.io/gh/buildpacks/pack) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4748/badge)](https://bestpractices.coreinfrastructure.org/projects/4748) + [![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/buildpacks/lifecycle) + +A reference implementation of the [Cloud Native Buildpacks specification](https://github.com/buildpacks/spec). + +## Supported APIs +Lifecycle Version | Platform APIs | Buildpack APIs | +------------------|----------------------------------------------------------------------|----------------| +0.12.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6], [0.7][p/0.7] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6] +0.11.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5], [0.6][p/0.6] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5], [0.6][b/0.6] +0.10.x | [0.3][p/0.3], [0.4][p/0.4], [0.5][p/0.5] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4], [0.5][b/0.5] +0.9.x | [0.3][p/0.3], [0.4][p/0.4] | [0.2][b/0.2], [0.3][b/0.3], [0.4][b/0.4] +0.8.x | [0.3][p/0.3] | [0.2][b/0.2] +0.7.x | [0.2][p/0.2] | [0.2][b/0.2] +0.6.x | [0.2][p/0.2] | [0.2][b/0.2] + +[b/0.2]: https://github.com/buildpacks/spec/blob/buildpack/v0.2/buildpack.md +[b/0.3]: https://github.com/buildpacks/spec/tree/buildpack/v0.3/buildpack.md +[b/0.4]: https://github.com/buildpacks/spec/tree/buildpack/v0.4/buildpack.md +[b/0.5]: https://github.com/buildpacks/spec/tree/buildpack/v0.5/buildpack.md +[b/0.6]: https://github.com/buildpacks/spec/tree/buildpack/v0.6/buildpack.md +[p/0.2]: https://github.com/buildpacks/spec/blob/platform/v0.2/platform.md +[p/0.3]: https://github.com/buildpacks/spec/blob/platform/v0.3/platform.md +[p/0.4]: https://github.com/buildpacks/spec/blob/platform/v0.4/platform.md +[p/0.5]: https://github.com/buildpacks/spec/blob/platform/v0.5/platform.md +[p/0.6]: https://github.com/buildpacks/spec/blob/platform/v0.6/platform.md +[p/0.7]: https://github.com/buildpacks/spec/blob/platform/v0.7/platform.md + +\* denotes unreleased version + +## Usage + +### Build + +Either: +* `detector` - Chooses buildpacks (via `/bin/detect`) and produces a build plan. +* `analyzer` - Restores layer metadata from the previous image and from the cache. +* `restorer` - Restores cached layers. +* `builder` - Executes buildpacks (via `/bin/build`). +* `exporter` - Creates an image and caches layers. + +Or: +* `creator` - Runs the five phases listed above in order. + +### Run + +* `launcher` - Invokes a chosen process. + +### Rebase + +* `rebaser` - Creates an image from a previous image with updated base layers. + +## Contributing +- [CONTRIBUTING](CONTRIBUTING.md) - Information on how to contribute and grow your understanding of the lifecycle. +- [DEVELOPMENT](DEVELOPMENT.md) - Further detail to help you during the development process. +- [RELEASE](RELEASE.md) - Further details about our release process. diff --git a/vendor/github.com/buildpacks/lifecycle/RELEASE.md b/vendor/github.com/buildpacks/lifecycle/RELEASE.md new file mode 100644 index 0000000000..50b14a3c42 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/RELEASE.md @@ -0,0 +1,22 @@ +## Release Finalization + +To cut a pre-release: +1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550). +1. Create a release branch in the format `release/0.99.0-rc.1`. New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:`. +1. When ready to cut the release, manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/0.99.0-rc.1`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch. +1. Edit the release notes as necessary. +1. Perform any manual validation of the artifacts (see below). +1. When ready to publish the release, edit the release page and click "Publish release". This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:` to `buildpacksio/lifecycle:0.99.0` but will NOT update the `latest` tag. + +To cut a release: +1. Ensure the relevant spec APIs have been released. +1. Ensure the `lifecycle/0.99.0` milestone on the [docs repo](https://github.com/buildpacks/docs/blob/main/RELEASE.md#lump-changes) is complete, such that every new feature in the lifecycle is fully explained in the `release/lifecycle/0.99` branch on the docs repo, and [migration guides](https://github.com/buildpacks/docs/tree/main/content/docs/reference/spec/migration) (if relevant) are included. +1. If applicable, ensure the README is updated with the latest supported apis (example PR: https://github.com/buildpacks/lifecycle/pull/550). +1. Create a release branch in the format `release/0.99.0`. New commits to this branch will trigger the `build` workflow and produce a lifecycle image: `buildpacksio/lifecycle:`. +1. When ready to cut the release, manually trigger the `draft-release` workflow: Actions -> draft-release -> Run workflow -> Use workflow from branch: `release/0.99.0`. This will create a draft release on GitHub using the artifacts from the `build` workflow run for the latest commit on the release branch. +1. Edit the release notes as necessary. +1. Perform any manual validation of the artifacts. +1. When ready to publish the release, edit the release page and click "Publish release". This will trigger the `post-release` workflow that will re-tag the lifecycle image from `buildpacksio/lifecycle:` to `buildpacksio/lifecycle:0.99.0` and `buildpacksio/lifecycle:latest`. +1. Once released +- Update the `main` branch to remove the pre-release note in [README.md](https://github.com/buildpacks/lifecycle/blob/main/README.md) and/or merge `release/0.99.0` into `main`. +- Ask the learning team to merge the `release/lifecycle/0.99` branch into `main` on the docs repo. diff --git a/vendor/github.com/buildpacks/lifecycle/analyzer.go b/vendor/github.com/buildpacks/lifecycle/analyzer.go new file mode 100644 index 0000000000..01969441e1 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/analyzer.go @@ -0,0 +1,95 @@ +package lifecycle + +import ( + "github.com/buildpacks/imgutil" + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/platform" +) + +type Platform interface { + API() string +} + +type Analyzer struct { + PreviousImage imgutil.Image + RunImage imgutil.Image + Logger Logger + Platform Platform + + // Platform API < 0.7 + Buildpacks []buildpack.GroupBuildpack + Cache Cache + LayerMetadataRestorer LayerMetadataRestorer +} + +// Analyze fetches the layers metadata from the previous image and writes analyzed.toml. +func (a *Analyzer) Analyze() (platform.AnalyzedMetadata, error) { + var ( + appMeta platform.LayersMetadata + cacheMeta platform.CacheMetadata + previousImageID *platform.ImageIdentifier + runImageID *platform.ImageIdentifier + err error + ) + + if a.PreviousImage != nil { // Previous image is optional in Platform API >= 0.7 + previousImageID, err = a.getImageIdentifier(a.PreviousImage) + if err != nil { + return platform.AnalyzedMetadata{}, errors.Wrap(err, "retrieving image identifier") + } + + // continue even if the label cannot be decoded + if err := DecodeLabel(a.PreviousImage, platform.LayerMetadataLabel, &appMeta); err != nil { + appMeta = platform.LayersMetadata{} + } + } else { + appMeta = platform.LayersMetadata{} + } + + if a.RunImage != nil { + runImageID, err = a.getImageIdentifier(a.RunImage) + if err != nil { + return platform.AnalyzedMetadata{}, errors.Wrap(err, "retrieving image identifier") + } + } + + if a.restoresLayerMetadata() { + cacheMeta, err = retrieveCacheMetadata(a.Cache, a.Logger) + if err != nil { + return platform.AnalyzedMetadata{}, err + } + + useShaFiles := true + if err := a.LayerMetadataRestorer.Restore(a.Buildpacks, appMeta, cacheMeta, NewLayerSHAStore(useShaFiles)); err != nil { + return platform.AnalyzedMetadata{}, err + } + } + + return platform.AnalyzedMetadata{ + PreviousImage: previousImageID, + RunImage: runImageID, + Metadata: appMeta, + }, nil +} + +func (a *Analyzer) restoresLayerMetadata() bool { + return api.MustParse(a.Platform.API()).LessThan("0.7") +} + +func (a *Analyzer) getImageIdentifier(image imgutil.Image) (*platform.ImageIdentifier, error) { + if !image.Found() { + a.Logger.Infof("Previous image with name %q not found", image.Name()) + return nil, nil + } + identifier, err := image.Identifier() + if err != nil { + return nil, err + } + a.Logger.Debugf("Analyzing image %q", identifier.String()) + return &platform.ImageIdentifier{ + Reference: identifier.String(), + }, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/api/apis.go b/vendor/github.com/buildpacks/lifecycle/api/apis.go new file mode 100644 index 0000000000..5b0cac88cf --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/api/apis.go @@ -0,0 +1,87 @@ +package api + +import ( + "github.com/pkg/errors" +) + +var ( + Platform = newApisMustParse([]string{"0.3", "0.4", "0.5", "0.6", "0.7"}, nil) + Buildpack = newApisMustParse([]string{"0.2", "0.3", "0.4", "0.5", "0.6"}, nil) +) + +type APIs struct { + Supported []*Version + Deprecated []*Version +} + +// newApisMustParse calls NewApis and panics on error +func newApisMustParse(supported []string, deprecated []string) APIs { + apis, err := NewAPIs(supported, deprecated) + if err != nil { + panic(err) + } + return apis +} + +// NewApis constructs an instance of APIs +// supported must be a superset of deprecated +// deprecated APIs greater than 1.0 should should not include minor versions +// supported APIs should always include minor versions +// Examples: +// deprecated API 1 implies all 1.x APIs are deprecated +// supported API 1 implies only 1.0 is supported +func NewAPIs(supported []string, deprecated []string) (APIs, error) { + apis := APIs{} + for _, api := range supported { + apis.Supported = append(apis.Supported, MustParse(api)) + } + for _, d := range deprecated { + dAPI := MustParse(d) + if err := validateDeprecated(apis, dAPI); err != nil { + return APIs{}, errors.Wrapf(err, "invalid deprecated API '%s'", d) + } + apis.Deprecated = append(apis.Deprecated, dAPI) + } + return apis, nil +} + +func validateDeprecated(apis APIs, deprecated *Version) error { + if !apis.IsSupported(deprecated) { + return errors.New("all deprecated APIs must also be supported") + } + if deprecated.Major != 0 && deprecated.Minor != 0 { + return errors.New("deprecated APIs may only contain 0.x or a major version") + } + return nil +} + +// IsSupported returns true or false depending on whether the target API is supported +func (a APIs) IsSupported(target *Version) bool { + for _, sAPI := range a.Supported { + if sAPI.IsSupersetOf(target) { + return true + } + } + return false +} + +// IsDeprecated returns true or false depending on whether the target API is deprecated +func (a APIs) IsDeprecated(target *Version) bool { + for _, dAPI := range a.Deprecated { + if target.IsSupersetOf(dAPI) { + return true + } + } + return false +} + +// Latest returns the latest API that is supported +func (a APIs) Latest() *Version { + latest := a.Supported[0] + for _, sAPI := range a.Supported { + if sAPI.Compare(latest) > 0 { + latest = sAPI + } + } + return latest +} diff --git a/vendor/github.com/buildpacks/lifecycle/api/version.go b/vendor/github.com/buildpacks/lifecycle/api/version.go new file mode 100644 index 0000000000..b7727a5ad2 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/api/version.go @@ -0,0 +1,129 @@ +package api + +import ( + "fmt" + "regexp" + "strconv" + + "github.com/pkg/errors" +) + +var regex = regexp.MustCompile(`^v?(\d+)\.?(\d*)$`) + +type Version struct { + Major, + Minor uint64 +} + +func MustParse(v string) *Version { + version, err := NewVersion(v) + if err != nil { + panic(err) + } + + return version +} + +func NewVersion(v string) (*Version, error) { + matches := regex.FindAllStringSubmatch(v, -1) + if len(matches) == 0 { + return nil, errors.Errorf("could not parse '%s' as version", v) + } + + var ( + major, minor uint64 + err error + ) + if len(matches[0]) == 3 { + major, err = strconv.ParseUint(matches[0][1], 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing Major '%s'", matches[0][1]) + } + + if matches[0][2] == "" { + minor = 0 + } else { + minor, err = strconv.ParseUint(matches[0][2], 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "parsing Minor '%s'", matches[0][2]) + } + } + } else { + return nil, errors.Errorf("could not parse version '%s'", v) + } + + return &Version{Major: major, Minor: minor}, nil +} + +func (v *Version) String() string { + return fmt.Sprintf("%d.%d", v.Major, v.Minor) +} + +// MarshalText makes Version satisfy the encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.String()), nil +} + +// UnmarshalText makes Version satisfy the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + s := string(text) + + parsedVersion, err := NewVersion(s) + if err != nil { + return errors.Wrapf(err, "invalid api version '%s'", s) + } + + v.Major = parsedVersion.Major + v.Minor = parsedVersion.Minor + + return nil +} + +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare returns one of the following results +// -1 is less than *Version o +// 0 is equal to *Version o +// 1 is greater than *Version o +func (v *Version) Compare(o *Version) int { + if v.Major != o.Major { + if v.Major < o.Major { + return -1 + } + + if v.Major > o.Major { + return 1 + } + } + + if v.Minor != o.Minor { + if v.Minor < o.Minor { + return -1 + } + + if v.Minor > o.Minor { + return 1 + } + } + + return 0 +} + +func (v *Version) IsSupersetOf(o *Version) bool { + if v.Major == 0 { + return v.Equal(o) + } + return v.Major == o.Major && v.Minor >= o.Minor +} + +func (v *Version) LessThan(other string) bool { + otherVersion := MustParse(other) + return v.Compare(otherVersion) < 0 +} + +func (v *Version) AtLeast(other string) bool { + otherVersion := MustParse(other) + return v.Compare(otherVersion) >= 0 +} diff --git a/vendor/github.com/buildpacks/lifecycle/archive/archive.go b/vendor/github.com/buildpacks/lifecycle/archive/archive.go new file mode 100644 index 0000000000..136190edc8 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/archive/archive.go @@ -0,0 +1,72 @@ +package archive + +import ( + "archive/tar" + "io" + "os" + "path/filepath" +) + +// PathInfo associates a path with an os.FileInfo +type PathInfo struct { + Path string + Info os.FileInfo +} + +// AddFilesToArchive writes entries describing all files to the provided TarWriter +func AddFilesToArchive(tw TarWriter, files []PathInfo) error { + for _, file := range files { + if err := AddFileToArchive(tw, file.Path, file.Info); err != nil { + return err + } + } + return nil +} + +// AddFileToArchive writes an entry describing the file at path with the given os.FileInfo to the provided TarWriter +func AddFileToArchive(tw TarWriter, path string, fi os.FileInfo) error { + if fi.Mode()&os.ModeSocket != 0 { + return nil + } + header, err := tar.FileInfoHeader(fi, "") + if err != nil { + return err + } + header.Name = path + + if fi.Mode()&os.ModeSymlink != 0 { + var err error + target, err := os.Readlink(path) + if err != nil { + return err + } + header.Linkname = target + } + addSysAttributes(header, fi) + if err := tw.WriteHeader(header); err != nil { + return err + } + if fi.Mode().IsRegular() { + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + if _, err := io.Copy(tw, f); err != nil { + return err + } + } + return nil +} + +// AddDirToArchive walks dir writes entries describing dir and all of its children files to the provided TarWriter +func AddDirToArchive(tw TarWriter, dir string) error { + dir = filepath.Clean(dir) + + return filepath.Walk(dir, func(file string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + return AddFileToArchive(tw, file, fi) + }) +} diff --git a/vendor/github.com/buildpacks/lifecycle/archive/extract.go b/vendor/github.com/buildpacks/lifecycle/archive/extract.go new file mode 100644 index 0000000000..56de8a4942 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/archive/extract.go @@ -0,0 +1,117 @@ +package archive + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path/filepath" + "sync" + + "github.com/pkg/errors" +) + +type PathMode struct { + Path string + Mode os.FileMode +} + +var ( + umaskLock sync.Mutex + extractCounter int + originalUmask int +) + +func setUmaskIfNeeded() { + umaskLock.Lock() + defer umaskLock.Unlock() + extractCounter++ + if extractCounter == 1 { + originalUmask = setUmask(0) + } +} + +func unsetUmaskIfNeeded() { + umaskLock.Lock() + defer umaskLock.Unlock() + extractCounter-- + if extractCounter == 0 { + _ = setUmask(originalUmask) + } +} + +// Extract reads all entries from TarReader and extracts them to the filesystem. +func Extract(tr TarReader) error { + setUmaskIfNeeded() + defer unsetUmaskIfNeeded() + + buf := make([]byte, 32*32*1024) + dirsFound := make(map[string]bool) + + var pathModes []PathMode + for { + hdr, err := tr.Next() + if err == io.EOF { + for _, pathMode := range pathModes { // directories that are newly created and for which there is a header in the tar should have the right permissions + if err := os.Chmod(pathMode.Path, pathMode.Mode); err != nil { + return err + } + } + return nil + } + if err != nil { + return errors.Wrap(err, "error extracting from archive") + } + + switch hdr.Typeflag { + case tar.TypeDir: + if _, err := os.Stat(hdr.Name); os.IsNotExist(err) { + pathMode := PathMode{hdr.Name, hdr.FileInfo().Mode()} + pathModes = append(pathModes, pathMode) + } + if err := os.MkdirAll(hdr.Name, os.ModePerm); err != nil { + return errors.Wrapf(err, "failed to create directory %q", hdr.Name) + } + dirsFound[hdr.Name] = true + + case tar.TypeReg, tar.TypeRegA: + dirPath := filepath.Dir(hdr.Name) + if !dirsFound[dirPath] { + if _, err := os.Stat(dirPath); os.IsNotExist(err) { + if err := os.MkdirAll(dirPath, applyUmask(os.ModePerm, originalUmask)); err != nil { // if there is no header for the parent directory in the tar, apply the provided umask + return errors.Wrapf(err, "failed to create parent dir %q for file %q", dirPath, hdr.Name) + } + dirsFound[dirPath] = true + } + } + + if err := writeFile(tr, hdr.Name, hdr.FileInfo().Mode(), buf); err != nil { + return errors.Wrapf(err, "failed to write file %q", hdr.Name) + } + case tar.TypeSymlink: + if err := createSymlink(hdr); err != nil { + return errors.Wrapf(err, "failed to create symlink %q with target %q", hdr.Name, hdr.Linkname) + } + default: + return fmt.Errorf("unknown file type in tar %d", hdr.Typeflag) + } + } +} + +func applyUmask(mode os.FileMode, umask int) os.FileMode { + return os.FileMode(int(mode) &^ umask) +} + +func writeFile(in io.Reader, path string, mode os.FileMode, buf []byte) (err error) { + fh, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, mode) + if err != nil { + return err + } + defer func() { + if closeErr := fh.Close(); err == nil { + err = closeErr + } + }() + _, err = io.CopyBuffer(fh, in, buf) + return err +} diff --git a/vendor/github.com/buildpacks/lifecycle/archive/reader.go b/vendor/github.com/buildpacks/lifecycle/archive/reader.go new file mode 100644 index 0000000000..3712a72e4c --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/archive/reader.go @@ -0,0 +1,75 @@ +package archive + +import ( + "archive/tar" + "path/filepath" + "strings" +) + +type TarReader interface { + Next() (*tar.Header, error) + Read(b []byte) (int, error) +} + +// NormalizingTarReader read from the wrapped TarReader normalizes header before passing them through to the caller +// NormalizingTarReader always normalizes header.Name so that path separators match the runtime OS +// Other modifications can be enabled by invoking options on the NormalizingTarReader +type NormalizingTarReader struct { + TarReader + headerOpts []HeaderOpt + excludedPaths []string +} + +// Strip removes leading directories for any subsequently read *tar.Header +func (tr *NormalizingTarReader) Strip(prefix string) { + tr.headerOpts = append(tr.headerOpts, func(header *tar.Header) *tar.Header { + header.Name = strings.TrimPrefix(header.Name, prefix) + return header + }) +} + +// ExcludedPaths configures an array of paths to be skipped on subsequent calls to Next +// Children of the configured paths will also be skipped +// paths should match the unmodified Name of the *tar.Header returned by the wrapped TarReader, not the normalized headers +func (tr *NormalizingTarReader) ExcludePaths(paths []string) { + tr.excludedPaths = paths +} + +// PrependDir will set the Name of any subsequently read *tar.Header the result of filepath.Join of dir and the +// original Name +func (tr *NormalizingTarReader) PrependDir(dir string) { + tr.headerOpts = append(tr.headerOpts, func(hdr *tar.Header) *tar.Header { + hdr.Name = filepath.Join(dir, hdr.Name) + return hdr + }) +} + +// NewNormalizingTarReader creates a NormalizingTarReaders that wraps the provided TarReader +func NewNormalizingTarReader(tr TarReader) *NormalizingTarReader { + return &NormalizingTarReader{TarReader: tr} +} + +// Next calls Next on the wrapped TarReader and applies modifications before returning the *tar.Header +// If the wrapped TarReader returns a *tar.Header matching an excluded path Next will proceed to the next entry, +// returning the first non-excluded entry +// Modification options will be apply in the order the options were invoked. +// Standard modifications (path separators normalization) are applied last. +func (tr *NormalizingTarReader) Next() (*tar.Header, error) { + hdr, err := tr.TarReader.Next() + if err != nil { + return nil, err + } + for _, excluded := range tr.excludedPaths { + if strings.HasPrefix(hdr.Name, excluded) { + return tr.Next() // If path is excluded move on to the next entry + } + } + for _, opt := range tr.headerOpts { + hdr = opt(hdr) + } + if hdr.Name == "" { + return tr.Next() // If entire path is stripped move on to the next entry + } + hdr.Name = filepath.FromSlash(hdr.Name) + return hdr, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go b/vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go new file mode 100644 index 0000000000..474dbd0f20 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/archive/tar_unix.go @@ -0,0 +1,21 @@ +// +build linux darwin + +package archive + +import ( + "archive/tar" + "os" + + "golang.org/x/sys/unix" +) + +func setUmask(newMask int) (oldMask int) { + return unix.Umask(newMask) +} + +func createSymlink(hdr *tar.Header) error { + return os.Symlink(hdr.Linkname, hdr.Name) +} + +func addSysAttributes(hdr *tar.Header, fi os.FileInfo) { +} diff --git a/vendor/github.com/buildpacks/lifecycle/archive/tar_windows.go b/vendor/github.com/buildpacks/lifecycle/archive/tar_windows.go new file mode 100644 index 0000000000..61f3cc8e3d --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/archive/tar_windows.go @@ -0,0 +1,56 @@ +package archive + +import ( + "archive/tar" + "os" + "strconv" + "syscall" + + "github.com/pkg/errors" +) + +const ( + symbolicLinkFlagAllowUnprivilegedCreate = 0x2 + + // MSWINDOWS pax vendor extensions + hdrMSWindowsPrefix = "MSWINDOWS." + hdrFileAttributes = hdrMSWindowsPrefix + "fileattr" +) + +func setUmask(newMask int) (oldMask int) { + // Not implemented on Windows + return 0 +} + +// createSymlink uses the file attributes in the PAX records to decide whether to make a directory or file type symlink. +// We must use the syscall because we often create symlinks when the target does not exist and os.Symlink uses the mode +// of the target to create the appropriate type of symlink on windows https://github.com/golang/go/issues/39183 +func createSymlink(hdr *tar.Header) error { + var flags uint32 = symbolicLinkFlagAllowUnprivilegedCreate + if attrStr, ok := hdr.PAXRecords[hdrFileAttributes]; ok { + attr, err := strconv.ParseUint(attrStr, 10, 32) + if err != nil { + return errors.Wrapf(err, "failed to parse file attributes for header %q", hdr.Name) + } + if attr&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + flags |= syscall.SYMBOLIC_LINK_FLAG_DIRECTORY + } + } + + name, err := syscall.UTF16PtrFromString(hdr.Name) + if err != nil { + return err + } + target, err := syscall.UTF16PtrFromString(hdr.Linkname) + if err != nil { + return err + } + return syscall.CreateSymbolicLink(name, target, flags) +} + +// addSysAttributes adds PAXRecords containing file attributes +func addSysAttributes(hdr *tar.Header, fi os.FileInfo) { + attrs := fi.Sys().(*syscall.Win32FileAttributeData).FileAttributes + hdr.PAXRecords = map[string]string{} + hdr.PAXRecords[hdrFileAttributes] = strconv.FormatUint(uint64(attrs), 10) +} diff --git a/vendor/github.com/buildpacks/lifecycle/archive/writer.go b/vendor/github.com/buildpacks/lifecycle/archive/writer.go new file mode 100644 index 0000000000..431fec7bce --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/archive/writer.go @@ -0,0 +1,71 @@ +package archive + +import ( + "archive/tar" + "path/filepath" + "strings" + "time" +) + +var ( + // NormalizedModTime provides a valid "zero" value for ModTime + NormalizedModTime = time.Date(1980, time.January, 1, 0, 0, 1, 0, time.UTC) +) + +type TarWriter interface { + WriteHeader(hdr *tar.Header) error + Write(b []byte) (int, error) + Close() error +} + +// NormalizingTarWriter normalizes any written *tar.Header before passing it through to the wrapped TarWriter +// NormalizingTarWriter always normalizes ModTime, Uname, and Gname +// Other modifications can be enabled by invoking options on the NormalizingTarWriter +type NormalizingTarWriter struct { + TarWriter + headerOpts []HeaderOpt +} + +type HeaderOpt func(header *tar.Header) *tar.Header + +// WithUID sets Uid of any subsequently written *tar.Header to uid +func (tw *NormalizingTarWriter) WithUID(uid int) { + tw.headerOpts = append(tw.headerOpts, func(hdr *tar.Header) *tar.Header { + hdr.Uid = uid + return hdr + }) +} + +// WithGID sets Gid of any subsequently written *tar.Header to gid +func (tw *NormalizingTarWriter) WithGID(gid int) { + tw.headerOpts = append(tw.headerOpts, func(hdr *tar.Header) *tar.Header { + hdr.Gid = gid + return hdr + }) +} + +// WithModTime sets the ModTime of any subsequently written *tar.Header to modTime +func (tw *NormalizingTarWriter) WithModTime(modTime time.Time) { + tw.headerOpts = append(tw.headerOpts, func(hdr *tar.Header) *tar.Header { + hdr.ModTime = modTime + return hdr + }) +} + +// NewNormalizingTarWriter creates a NormalizingTarWriter that wraps the provided TarWriter +func NewNormalizingTarWriter(tw TarWriter) *NormalizingTarWriter { + return &NormalizingTarWriter{tw, []HeaderOpt{}} +} + +// WriteHeader writes the header to the wrapped TarWriter after applying standard and configured modifications +// Modification options will be apply in the order the options were invoked. +// Standard modification (ModTime, Uname, and Gname) are applied last. +func (tw *NormalizingTarWriter) WriteHeader(hdr *tar.Header) error { + for _, opt := range tw.headerOpts { + hdr = opt(hdr) + } + hdr.Name = filepath.ToSlash(strings.TrimPrefix(hdr.Name, filepath.VolumeName(hdr.Name))) + hdr.Uname = "" + hdr.Gname = "" + return tw.TarWriter.WriteHeader(hdr) +} diff --git a/vendor/github.com/buildpacks/lifecycle/auth/env_keychain.go b/vendor/github.com/buildpacks/lifecycle/auth/env_keychain.go new file mode 100644 index 0000000000..d2f341f27c --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/auth/env_keychain.go @@ -0,0 +1,196 @@ +package auth + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "os" + "regexp" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/pkg/errors" +) + +const EnvRegistryAuth = "CNB_REGISTRY_AUTH" + +// DefaultKeychain returns a keychain containing authentication configuration for the given images +// from the following sources, if they exist, in order of precedence: +// the provided environment variable +// the docker config.json file +func DefaultKeychain(images ...string) (authn.Keychain, error) { + envKeychain, err := EnvKeychain(EnvRegistryAuth) + if err != nil { + return nil, err + } + + return authn.NewMultiKeychain( + envKeychain, + InMemoryKeychain(authn.DefaultKeychain, images...), + ), nil +} + +// ResolvedKeychain is an implementation of authn.Keychain that stores credentials in memory. +type ResolvedKeychain struct { + Auths map[string]string +} + +// EnvKeychain returns an authn.Keychain that uses the provided environment variable as a source of credentials. +// The value of the environment variable should be a JSON object that maps OCI registry hostnames to Authorization headers. +func EnvKeychain(envVar string) (authn.Keychain, error) { + authHeaders, err := ReadEnvVar(envVar) + if err != nil { + return nil, errors.Wrap(err, "reading auth env var") + } + return &ResolvedKeychain{Auths: authHeaders}, nil +} + +// InMemoryKeychain resolves credentials for the given images from the given keychain and returns a new keychain +// that stores the pre-resolved credentials in memory and returns them on demand. This is useful in cases where the +// backing credential store may become inaccessible in the the future. +func InMemoryKeychain(keychain authn.Keychain, images ...string) authn.Keychain { + return &ResolvedKeychain{ + Auths: buildAuthMap(keychain, images...), + } +} + +func (k *ResolvedKeychain) Resolve(resource authn.Resource) (authn.Authenticator, error) { + header, ok := k.Auths[resource.RegistryStr()] + if ok { + authConfig, err := authHeaderToConfig(header) + if err != nil { + return nil, errors.Wrap(err, "parsing auth header") + } + + return &providedAuth{config: authConfig}, nil + } + + return authn.Anonymous, nil +} + +type providedAuth struct { + config *authn.AuthConfig +} + +func (p *providedAuth) Authorization() (*authn.AuthConfig, error) { + return p.config, nil +} + +// ReadEnvVar parses an environment variable to produce a map of 'registry url' to 'authorization header'. +// +// Complementary to `BuildEnvVar`. +// +// Example Input: +// {"gcr.io": "Bearer asdf=", "docker.io": "Basic qwerty="} +// +// Example Output: +// gcr.io -> Bearer asdf= +// docker.io -> Basic qwerty= +func ReadEnvVar(envVar string) (map[string]string, error) { + authMap := map[string]string{} + + env := os.Getenv(envVar) + if env != "" { + err := json.Unmarshal([]byte(env), &authMap) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s value", envVar) + } + } + + return authMap, nil +} + +func buildAuthMap(keychain authn.Keychain, images ...string) map[string]string { + registryAuths := map[string]string{} + + for _, image := range images { + reference, authenticator, err := ReferenceForRepoName(keychain, image) + if err != nil { + continue + } + + if authenticator == authn.Anonymous { + continue + } + + authConfig, err := authenticator.Authorization() + if err != nil { + continue + } + + header, err := authConfigToHeader(authConfig) + if err != nil { + continue + } + registryAuths[reference.Context().Registry.Name()] = header + } + + return registryAuths +} + +// BuildEnvVar creates the contents to use for authentication environment variable. +// +// Complementary to `ReadEnvVar`. +func BuildEnvVar(keychain authn.Keychain, images ...string) (string, error) { + registryAuths := buildAuthMap(keychain, images...) + + authData, err := json.Marshal(registryAuths) + if err != nil { + return "", err + } + return string(authData), nil +} + +func authConfigToHeader(config *authn.AuthConfig) (string, error) { + if config.Auth != "" { + return fmt.Sprintf("Basic %s", config.Auth), nil + } + + if config.RegistryToken != "" { + return fmt.Sprintf("Bearer %s", config.RegistryToken), nil + } + + if config.Username != "" && config.Password != "" { + delimited := fmt.Sprintf("%s:%s", config.Username, config.Password) + encoded := base64.StdEncoding.EncodeToString([]byte(delimited)) + return fmt.Sprintf("Basic %s", encoded), nil + } + + return "", nil +} + +var ( + basicAuthRegExp = regexp.MustCompile("(?i)^basic (.*)$") + bearerAuthRegExp = regexp.MustCompile("(?i)^bearer (.*)$") +) + +func authHeaderToConfig(header string) (*authn.AuthConfig, error) { + if matches := basicAuthRegExp.FindAllStringSubmatch(header, -1); len(matches) != 0 { + return &authn.AuthConfig{ + Auth: matches[0][1], + }, nil + } + + if matches := bearerAuthRegExp.FindAllStringSubmatch(header, -1); len(matches) != 0 { + return &authn.AuthConfig{ + RegistryToken: matches[0][1], + }, nil + } + + return nil, errors.New("unknown auth type from header") +} + +// ReferenceForRepoName returns a reference and an authenticator for a given image name and keychain. +func ReferenceForRepoName(keychain authn.Keychain, ref string) (name.Reference, authn.Authenticator, error) { + var auth authn.Authenticator + r, err := name.ParseReference(ref, name.WeakValidation) + if err != nil { + return nil, nil, err + } + + auth, err = keychain.Resolve(r.Context().Registry) + if err != nil { + return nil, nil, err + } + return r, auth, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/builder.go b/vendor/github.com/buildpacks/lifecycle/builder.go new file mode 100644 index 0000000000..df61cd1b42 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/builder.go @@ -0,0 +1,202 @@ +package lifecycle + +import ( + "fmt" + "io" + "os" + "path/filepath" + "sort" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/env" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/layers" + "github.com/buildpacks/lifecycle/platform" +) + +type BuildEnv interface { + AddRootDir(baseDir string) error + AddEnvDir(envDir string, defaultAction env.ActionType) error + WithPlatform(platformDir string) ([]string, error) + List() []string +} + +type BuildpackStore interface { + Lookup(bpID, bpVersion string) (buildpack.Buildpack, error) +} + +type Buildpack interface { + Build(bpPlan buildpack.Plan, config buildpack.BuildConfig, bpEnv buildpack.BuildEnv) (buildpack.BuildResult, error) + ConfigFile() *buildpack.Descriptor + Detect(config *buildpack.DetectConfig, bpEnv buildpack.BuildEnv) buildpack.DetectRun +} + +type Builder struct { + AppDir string + LayersDir string + PlatformDir string + Platform Platform + PlatformAPI *api.Version // TODO: derive from platform + Group buildpack.Group + Plan platform.BuildPlan + Out, Err io.Writer + Logger Logger + BuildpackStore BuildpackStore +} + +func (b *Builder) Build() (*platform.BuildMetadata, error) { + b.Logger.Debug("Starting build") + + config, err := b.BuildConfig() + if err != nil { + return nil, err + } + + processMap := newProcessMap() + plan := b.Plan + var bom []buildpack.BOMEntry + var slices []layers.Slice + var labels []buildpack.Label + + bpEnv := env.NewBuildEnv(os.Environ()) + + for _, bp := range b.Group.Group { + b.Logger.Debugf("Running build for buildpack %s", bp) + + b.Logger.Debug("Looking up buildpack") + bpTOML, err := b.BuildpackStore.Lookup(bp.ID, bp.Version) + if err != nil { + return nil, err + } + + b.Logger.Debug("Finding plan") + bpPlan := plan.Find(bp.ID) + + br, err := bpTOML.Build(bpPlan, config, bpEnv) + if err != nil { + return nil, err + } + + b.Logger.Debug("Updating buildpack processes") + updateDefaultProcesses(br.Processes, api.MustParse(bp.API), b.PlatformAPI) + + bom = append(bom, br.BOM...) + labels = append(labels, br.Labels...) + plan = plan.Filter(br.MetRequires) + + b.Logger.Debug("Updating process list") + warning := processMap.add(br.Processes) + if warning != "" { + b.Logger.Warn(warning) + } + + slices = append(slices, br.Slices...) + + b.Logger.Debugf("Finished running build for buildpack %s", bp) + } + + if b.PlatformAPI.LessThan("0.4") { + config.Logger.Debug("Updating BOM entries") + for i := range bom { + bom[i].ConvertMetadataToVersion() + } + } + + b.Logger.Debug("Listing processes") + procList := processMap.list() + + b.Logger.Debug("Finished build") + return &platform.BuildMetadata{ + BOM: bom, + Buildpacks: b.Group.Group, + Labels: labels, + Processes: procList, + Slices: slices, + BuildpackDefaultProcessType: processMap.defaultType, + }, nil +} + +// we set default = true for web processes when platformAPI >= 0.6 and buildpackAPI < 0.6 +func updateDefaultProcesses(processes []launch.Process, buildpackAPI *api.Version, platformAPI *api.Version) { + if platformAPI.LessThan("0.6") || buildpackAPI.AtLeast("0.6") { + return + } + + for i := range processes { + if processes[i].Type == "web" { + processes[i].Default = true + } + } +} + +func (b *Builder) BuildConfig() (buildpack.BuildConfig, error) { + appDir, err := filepath.Abs(b.AppDir) + if err != nil { + return buildpack.BuildConfig{}, err + } + platformDir, err := filepath.Abs(b.PlatformDir) + if err != nil { + return buildpack.BuildConfig{}, err + } + layersDir, err := filepath.Abs(b.LayersDir) + if err != nil { + return buildpack.BuildConfig{}, err + } + + return buildpack.BuildConfig{ + AppDir: appDir, + PlatformDir: platformDir, + LayersDir: layersDir, + Out: b.Out, + Err: b.Err, + Logger: b.Logger, + }, nil +} + +type processMap struct { + typeToProcess map[string]launch.Process + defaultType string +} + +func newProcessMap() processMap { + return processMap{ + typeToProcess: make(map[string]launch.Process), + defaultType: "", + } +} + +// This function adds the processes from listToAdd to processMap +// it sets m.defaultType to the last default process +// if a non-default process overrides a default process, it returns a warning and unset m.defaultType +func (m *processMap) add(listToAdd []launch.Process) string { + warning := "" + for _, procToAdd := range listToAdd { + if procToAdd.Default { + m.defaultType = procToAdd.Type + warning = "" + } else if procToAdd.Type == m.defaultType { + // non-default process overrides a default process + m.defaultType = "" + warning = fmt.Sprintf("Warning: redefining the following default process type with a process not marked as default: %s\n", procToAdd.Type) + } + m.typeToProcess[procToAdd.Type] = procToAdd + } + return warning +} + +// list returns a sorted array of processes. +// The array is sorted based on the process types. +// The list is sorted for reproducibility. +func (m processMap) list() []launch.Process { + var keys []string + for proc := range m.typeToProcess { + keys = append(keys, proc) + } + sort.Strings(keys) + result := []launch.Process{} + for _, key := range keys { + result = append(result, m.typeToProcess[key].NoDefault()) // we set the default to false so it won't be part of metadata.toml + } + return result +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/build.go b/vendor/github.com/buildpacks/lifecycle/buildpack/build.go new file mode 100644 index 0000000000..aa0f5dafef --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/build.go @@ -0,0 +1,409 @@ +package buildpack + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/BurntSushi/toml" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack/layertypes" + "github.com/buildpacks/lifecycle/env" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/layers" +) + +type BuildEnv interface { + AddRootDir(baseDir string) error + AddEnvDir(envDir string, defaultAction env.ActionType) error + WithPlatform(platformDir string) ([]string, error) + List() []string +} + +type BuildConfig struct { + AppDir string + PlatformDir string + LayersDir string + Out io.Writer + Err io.Writer + Logger Logger +} + +type BuildResult struct { + BOM []BOMEntry + Labels []Label + MetRequires []string + Processes []launch.Process + Slices []layers.Slice +} + +func (bom *BOMEntry) ConvertMetadataToVersion() { + if version, ok := bom.Metadata["version"]; ok { + metadataVersion := fmt.Sprintf("%v", version) + bom.Version = metadataVersion + } +} + +func (bom *BOMEntry) convertVersionToMetadata() { + if bom.Version != "" { + if bom.Metadata == nil { + bom.Metadata = make(map[string]interface{}) + } + bom.Metadata["version"] = bom.Version + bom.Version = "" + } +} + +func (b *Descriptor) Build(bpPlan Plan, config BuildConfig, bpEnv BuildEnv) (BuildResult, error) { + config.Logger.Debugf("Running build for buildpack %s", b) + + if api.MustParse(b.API).Equal(api.MustParse("0.2")) { + config.Logger.Debug("Updating buildpack plan entries") + + for i := range bpPlan.Entries { + bpPlan.Entries[i].convertMetadataToVersion() + } + } + + config.Logger.Debug("Creating plan directory") + planDir, err := ioutil.TempDir("", launch.EscapeID(b.Buildpack.ID)+"-") + if err != nil { + return BuildResult{}, err + } + defer os.RemoveAll(planDir) + + config.Logger.Debug("Preparing paths") + bpLayersDir, bpPlanPath, err := preparePaths(b.Buildpack.ID, bpPlan, config.LayersDir, planDir) + if err != nil { + return BuildResult{}, err + } + + config.Logger.Debug("Running build command") + if err := b.runBuildCmd(bpLayersDir, bpPlanPath, config, bpEnv); err != nil { + return BuildResult{}, err + } + + config.Logger.Debug("Processing layers") + pathToLayerMetadataFile, err := b.processLayers(bpLayersDir, config.Logger) + if err != nil { + return BuildResult{}, err + } + + config.Logger.Debug("Updating environment") + if err := b.setupEnv(pathToLayerMetadataFile, bpEnv); err != nil { + return BuildResult{}, err + } + + config.Logger.Debug("Reading output files") + return b.readOutputFiles(bpLayersDir, bpPlanPath, bpPlan, config.Logger) +} + +func renameLayerDirIfNeeded(layerMetadataFile layertypes.LayerMetadataFile, layerDir string) error { + // rename / to /.ignore if buildpack API >= 0.6 and all of the types flags are set to false + if !layerMetadataFile.Launch && !layerMetadataFile.Cache && !layerMetadataFile.Build { + if err := os.Rename(layerDir, layerDir+".ignore"); err != nil { + return err + } + } + return nil +} + +func (b *Descriptor) processLayers(layersDir string, logger Logger) (map[string]layertypes.LayerMetadataFile, error) { + if api.MustParse(b.API).LessThan("0.6") { + return eachDir(layersDir, b.API, func(path, buildpackAPI string) (layertypes.LayerMetadataFile, error) { + layerMetadataFile, msg, err := DecodeLayerMetadataFile(path+".toml", buildpackAPI) + if err != nil { + return layertypes.LayerMetadataFile{}, err + } + if msg != "" { + logger.Warn(msg) + } + return layerMetadataFile, nil + }) + } + return eachDir(layersDir, b.API, func(path, buildpackAPI string) (layertypes.LayerMetadataFile, error) { + layerMetadataFile, msg, err := DecodeLayerMetadataFile(path+".toml", buildpackAPI) + if err != nil { + return layertypes.LayerMetadataFile{}, err + } + if msg != "" { + return layertypes.LayerMetadataFile{}, errors.New(msg) + } + if err := renameLayerDirIfNeeded(layerMetadataFile, path); err != nil { + return layertypes.LayerMetadataFile{}, err + } + return layerMetadataFile, nil + }) +} + +func preparePaths(bpID string, bpPlan Plan, layersDir, planDir string) (string, string, error) { + bpDirName := launch.EscapeID(bpID) + bpLayersDir := filepath.Join(layersDir, bpDirName) + bpPlanDir := filepath.Join(planDir, bpDirName) + if err := os.MkdirAll(bpLayersDir, 0777); err != nil { + return "", "", err + } + if err := os.MkdirAll(bpPlanDir, 0777); err != nil { + return "", "", err + } + bpPlanPath := filepath.Join(bpPlanDir, "plan.toml") + if err := WriteTOML(bpPlanPath, bpPlan); err != nil { + return "", "", err + } + + return bpLayersDir, bpPlanPath, nil +} + +func WriteTOML(path string, data interface{}) error { + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return err + } + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + return toml.NewEncoder(f).Encode(data) +} + +func (b *Descriptor) runBuildCmd(bpLayersDir, bpPlanPath string, config BuildConfig, bpEnv BuildEnv) error { + cmd := exec.Command( + filepath.Join(b.Dir, "bin", "build"), + bpLayersDir, + config.PlatformDir, + bpPlanPath, + ) // #nosec G204 + cmd.Dir = config.AppDir + cmd.Stdout = config.Out + cmd.Stderr = config.Err + + var err error + if b.Buildpack.ClearEnv { + cmd.Env = bpEnv.List() + } else { + cmd.Env, err = bpEnv.WithPlatform(config.PlatformDir) + if err != nil { + return err + } + } + cmd.Env = append(cmd.Env, EnvBuildpackDir+"="+b.Dir) + + if err := cmd.Run(); err != nil { + return NewLifecycleError(err, ErrTypeBuildpack) + } + return nil +} + +func (b *Descriptor) setupEnv(pathToLayerMetadataFile map[string]layertypes.LayerMetadataFile, buildEnv BuildEnv) error { + bpAPI := api.MustParse(b.API) + for path, layerMetadataFile := range pathToLayerMetadataFile { + if !layerMetadataFile.Build { + continue + } + if err := buildEnv.AddRootDir(path); err != nil { + return err + } + if err := buildEnv.AddEnvDir(filepath.Join(path, "env"), env.DefaultActionType(bpAPI)); err != nil { + return err + } + if err := buildEnv.AddEnvDir(filepath.Join(path, "env.build"), env.DefaultActionType(bpAPI)); err != nil { + return err + } + } + return nil +} + +func eachDir(dir, buildpackAPI string, fn func(path, api string) (layertypes.LayerMetadataFile, error)) (map[string]layertypes.LayerMetadataFile, error) { + files, err := ioutil.ReadDir(dir) + if os.IsNotExist(err) { + return map[string]layertypes.LayerMetadataFile{}, nil + } else if err != nil { + return map[string]layertypes.LayerMetadataFile{}, err + } + pathToLayerMetadataFile := map[string]layertypes.LayerMetadataFile{} + for _, f := range files { + if !f.IsDir() { + continue + } + path := filepath.Join(dir, f.Name()) + layerMetadataFile, err := fn(path, buildpackAPI) + if err != nil { + return map[string]layertypes.LayerMetadataFile{}, err + } + pathToLayerMetadataFile[path] = layerMetadataFile + } + return pathToLayerMetadataFile, nil +} + +func (b *Descriptor) readOutputFiles(bpLayersDir, bpPlanPath string, bpPlanIn Plan, logger Logger) (BuildResult, error) { + br := BuildResult{} + bpFromBpInfo := GroupBuildpack{ID: b.Buildpack.ID, Version: b.Buildpack.Version} + + // setup launch.toml + var launchTOML LaunchTOML + launchPath := filepath.Join(bpLayersDir, "launch.toml") + + if api.MustParse(b.API).LessThan("0.5") { + // read buildpack plan + var bpPlanOut Plan + if _, err := toml.DecodeFile(bpPlanPath, &bpPlanOut); err != nil { + return BuildResult{}, err + } + + // set BOM and MetRequires + if err := validateBOM(bpPlanOut.toBOM(), b.API); err != nil { + return BuildResult{}, err + } + br.BOM = WithBuildpack(bpFromBpInfo, bpPlanOut.toBOM()) + for i := range br.BOM { + br.BOM[i].convertVersionToMetadata() + } + br.MetRequires = names(bpPlanOut.Entries) + + // read launch.toml, return if not exists + if _, err := toml.DecodeFile(launchPath, &launchTOML); os.IsNotExist(err) { + return br, nil + } else if err != nil { + return BuildResult{}, err + } + } else { + // read build.toml + var bpBuild BuildTOML + buildPath := filepath.Join(bpLayersDir, "build.toml") + if _, err := toml.DecodeFile(buildPath, &bpBuild); err != nil && !os.IsNotExist(err) { + return BuildResult{}, err + } + if err := validateBOM(bpBuild.BOM, b.API); err != nil { + return BuildResult{}, err + } + + // set MetRequires + if err := validateUnmet(bpBuild.Unmet, bpPlanIn); err != nil { + return BuildResult{}, err + } + br.MetRequires = names(bpPlanIn.filter(bpBuild.Unmet).Entries) + + // read launch.toml, return if not exists + if _, err := toml.DecodeFile(launchPath, &launchTOML); os.IsNotExist(err) { + return br, nil + } else if err != nil { + return BuildResult{}, err + } + + // set BOM + if err := validateBOM(launchTOML.BOM, b.API); err != nil { + return BuildResult{}, err + } + br.BOM = WithBuildpack(bpFromBpInfo, launchTOML.BOM) + } + + if err := overrideDefaultForOldBuildpacks(launchTOML.Processes, b.API, logger); err != nil { + return BuildResult{}, err + } + + if err := validateNoMultipleDefaults(launchTOML.Processes); err != nil { + return BuildResult{}, err + } + + // set data from launch.toml + br.Labels = append([]Label{}, launchTOML.Labels...) + for i := range launchTOML.Processes { + launchTOML.Processes[i].BuildpackID = b.Buildpack.ID + } + br.Processes = append([]launch.Process{}, launchTOML.Processes...) + br.Slices = append([]layers.Slice{}, launchTOML.Slices...) + + return br, nil +} + +func overrideDefaultForOldBuildpacks(processes []launch.Process, bpAPI string, logger Logger) error { + if api.MustParse(bpAPI).AtLeast("0.6") { + return nil + } + replacedDefaults := []string{} + for i := range processes { + if processes[i].Default { + replacedDefaults = append(replacedDefaults, processes[i].Type) + } + processes[i].Default = false + } + if len(replacedDefaults) > 0 { + logger.Warn(fmt.Sprintf("Warning: default processes aren't supported in this buildpack api version. Overriding the default value to false for the following processes: [%s]", strings.Join(replacedDefaults, ", "))) + } + return nil +} + +func validateNoMultipleDefaults(processes []launch.Process) error { + defaultType := "" + for _, process := range processes { + if process.Default && defaultType != "" { + return fmt.Errorf("multiple default process types aren't allowed") + } + if process.Default { + defaultType = process.Type + } + } + return nil +} + +func validateBOM(bom []BOMEntry, bpAPI string) error { + if api.MustParse(bpAPI).LessThan("0.5") { + for _, entry := range bom { + if version, ok := entry.Metadata["version"]; ok { + metadataVersion := fmt.Sprintf("%v", version) + if entry.Version != "" && entry.Version != metadataVersion { + return errors.New("top level version does not match metadata version") + } + } + } + } else { + for _, entry := range bom { + if entry.Version != "" { + return fmt.Errorf("bom entry '%s' has a top level version which is not allowed. The buildpack should instead set metadata.version", entry.Name) + } + } + } + return nil +} + +func validateUnmet(unmet []Unmet, bpPlan Plan) error { + for _, unmet := range unmet { + if unmet.Name == "" { + return errors.New("unmet.name is required") + } + found := false + for _, req := range bpPlan.Entries { + if unmet.Name == req.Name { + found = true + break + } + } + if !found { + return fmt.Errorf("unmet.name '%s' must match a requested dependency", unmet.Name) + } + } + return nil +} + +func names(requires []Require) []string { + var out []string + for _, req := range requires { + out = append(out, req.Name) + } + return out +} + +func WithBuildpack(bp GroupBuildpack, bom []BOMEntry) []BOMEntry { + var out []BOMEntry + for _, entry := range bom { + entry.Buildpack = bp.NoAPI().NoHomepage() + out = append(out, entry) + } + return out +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/descriptor.go b/vendor/github.com/buildpacks/lifecycle/buildpack/descriptor.go new file mode 100644 index 0000000000..460a979498 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/descriptor.go @@ -0,0 +1,72 @@ +// Buildpack descriptor file (https://github.com/buildpacks/spec/blob/main/buildpack.md#buildpacktoml-toml). + +package buildpack + +type Descriptor struct { + API string `toml:"api"` + Buildpack Info `toml:"buildpack"` + Order Order `toml:"order"` + Dir string `toml:"-"` +} + +func (b *Descriptor) ConfigFile() *Descriptor { + return b +} + +func (b *Descriptor) IsMetaBuildpack() bool { + return b.Order != nil +} + +func (b *Descriptor) String() string { + return b.Buildpack.Name + " " + b.Buildpack.Version +} + +type Info struct { + ClearEnv bool `toml:"clear-env,omitempty"` + Homepage string `toml:"homepage,omitempty"` + ID string `toml:"id"` + Name string `toml:"name"` + Version string `toml:"version"` +} + +type Order []Group + +type Group struct { + Group []GroupBuildpack `toml:"group"` +} + +func (bg Group) Append(group ...Group) Group { + for _, g := range group { + bg.Group = append(bg.Group, g.Group...) + } + return bg +} + +// A GroupBuildpack represents a buildpack referenced in a buildpack.toml's [[order.group]]. +// It may be a regular buildpack, or a meta buildpack. +type GroupBuildpack struct { + API string `toml:"api,omitempty" json:"-"` + Homepage string `toml:"homepage,omitempty" json:"homepage,omitempty"` + ID string `toml:"id" json:"id"` + Optional bool `toml:"optional,omitempty" json:"optional,omitempty"` + Version string `toml:"version" json:"version"` +} + +func (bp GroupBuildpack) String() string { + return bp.ID + "@" + bp.Version +} + +func (bp GroupBuildpack) NoOpt() GroupBuildpack { + bp.Optional = false + return bp +} + +func (bp GroupBuildpack) NoAPI() GroupBuildpack { + bp.API = "" + return bp +} + +func (bp GroupBuildpack) NoHomepage() GroupBuildpack { + bp.Homepage = "" + return bp +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/detect.go b/vendor/github.com/buildpacks/lifecycle/buildpack/detect.go new file mode 100644 index 0000000000..53adcaf3ae --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/detect.go @@ -0,0 +1,117 @@ +package buildpack + +import ( + "bytes" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "syscall" + + "github.com/BurntSushi/toml" + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" +) + +const EnvBuildpackDir = "CNB_BUILDPACK_DIR" + +type Logger interface { + Debug(msg string) + Debugf(fmt string, v ...interface{}) + + Info(msg string) + Infof(fmt string, v ...interface{}) + + Warn(msg string) + Warnf(fmt string, v ...interface{}) + + Error(msg string) + Errorf(fmt string, v ...interface{}) +} + +type DetectConfig struct { + AppDir string + PlatformDir string + Logger Logger +} + +func (b *Descriptor) Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun { + appDir, err := filepath.Abs(config.AppDir) + if err != nil { + return DetectRun{Code: -1, Err: err} + } + platformDir, err := filepath.Abs(config.PlatformDir) + if err != nil { + return DetectRun{Code: -1, Err: err} + } + planDir, err := ioutil.TempDir("", "plan.") + if err != nil { + return DetectRun{Code: -1, Err: err} + } + defer os.RemoveAll(planDir) + + planPath := filepath.Join(planDir, "plan.toml") + if err := ioutil.WriteFile(planPath, nil, 0600); err != nil { + return DetectRun{Code: -1, Err: err} + } + + out := &bytes.Buffer{} + cmd := exec.Command( + filepath.Join(b.Dir, "bin", "detect"), + platformDir, + planPath, + ) // #nosec G204 + cmd.Dir = appDir + cmd.Stdout = out + cmd.Stderr = out + + if b.Buildpack.ClearEnv { + cmd.Env = bpEnv.List() + } else { + cmd.Env, err = bpEnv.WithPlatform(config.PlatformDir) + if err != nil { + return DetectRun{Code: -1, Err: err} + } + } + cmd.Env = append(cmd.Env, EnvBuildpackDir+"="+b.Dir) + + if err := cmd.Run(); err != nil { + if err, ok := err.(*exec.ExitError); ok { + if status, ok := err.Sys().(syscall.WaitStatus); ok { + return DetectRun{Code: status.ExitStatus(), Output: out.Bytes()} + } + } + return DetectRun{Code: -1, Err: err, Output: out.Bytes()} + } + var t DetectRun + if _, err := toml.DecodeFile(planPath, &t); err != nil { + return DetectRun{Code: -1, Err: err, Output: out.Bytes()} + } + if api.MustParse(b.API).Equal(api.MustParse("0.2")) { + if t.hasInconsistentVersions() || t.Or.hasInconsistentVersions() { + t.Err = errors.Errorf(`buildpack %s has a "version" key that does not match "metadata.version"`, b.Buildpack.ID) + t.Code = -1 + } + } + if api.MustParse(b.API).AtLeast("0.3") { + if t.hasDoublySpecifiedVersions() || t.Or.hasDoublySpecifiedVersions() { + t.Err = errors.Errorf(`buildpack %s has a "version" key and a "metadata.version" which cannot be specified together. "metadata.version" should be used instead`, b.Buildpack.ID) + t.Code = -1 + } + } + if api.MustParse(b.API).AtLeast("0.3") { + if t.hasTopLevelVersions() || t.Or.hasTopLevelVersions() { + config.Logger.Warnf(`Warning: buildpack %s has a "version" key. This key is deprecated in build plan requirements in buildpack API 0.3. "metadata.version" should be used instead`, b.Buildpack.ID) + } + } + t.Output = out.Bytes() + return t +} + +type DetectRun struct { + BuildPlan + Output []byte `toml:"-"` + Code int `toml:"-"` + Err error `toml:"-"` +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/error.go b/vendor/github.com/buildpacks/lifecycle/buildpack/error.go new file mode 100644 index 0000000000..4405fc82fe --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/error.go @@ -0,0 +1,26 @@ +package buildpack + +type ErrorType string + +const ErrTypeBuildpack ErrorType = "ERR_BUILDPACK" +const ErrTypeFailedDetection ErrorType = "ERR_FAILED_DETECTION" + +type Error struct { + RootError error + Type ErrorType +} + +func (le *Error) Error() string { + if le.Cause() != nil { + return le.Cause().Error() + } + return string(le.Type) +} + +func (le *Error) Cause() error { + return le.RootError +} + +func NewLifecycleError(cause error, errType ErrorType) *Error { + return &Error{RootError: cause, Type: errType} +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/files.go b/vendor/github.com/buildpacks/lifecycle/buildpack/files.go new file mode 100644 index 0000000000..eca621e742 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/files.go @@ -0,0 +1,248 @@ +// Data Format Files for the buildpack api spec (https://github.com/buildpacks/spec/blob/main/buildpack.md#data-format). + +package buildpack + +import ( + "errors" + "fmt" + "os" + + "github.com/buildpacks/lifecycle/buildpack/layertypes" + api05 "github.com/buildpacks/lifecycle/buildpack/v05" + api06 "github.com/buildpacks/lifecycle/buildpack/v06" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/layers" +) + +// launch.toml + +type LaunchTOML struct { + BOM []BOMEntry + Labels []Label + Processes []launch.Process `toml:"processes"` + Slices []layers.Slice `toml:"slices"` +} + +type BOMEntry struct { + Require + Buildpack GroupBuildpack `toml:"buildpack" json:"buildpack"` +} + +type Require struct { + Name string `toml:"name" json:"name"` + Version string `toml:"version,omitempty" json:"version,omitempty"` + Metadata map[string]interface{} `toml:"metadata" json:"metadata"` +} + +func (r *Require) convertMetadataToVersion() { + if version, ok := r.Metadata["version"]; ok { + r.Version = fmt.Sprintf("%v", version) + } +} + +func (r *Require) ConvertVersionToMetadata() { + if r.Version != "" { + if r.Metadata == nil { + r.Metadata = make(map[string]interface{}) + } + r.Metadata["version"] = r.Version + r.Version = "" + } +} + +func (r *Require) hasDoublySpecifiedVersions() bool { + if _, ok := r.Metadata["version"]; ok { + return r.Version != "" + } + return false +} + +func (r *Require) hasInconsistentVersions() bool { + if version, ok := r.Metadata["version"]; ok { + return r.Version != "" && r.Version != version + } + return false +} + +func (r *Require) hasTopLevelVersions() bool { + return r.Version != "" +} + +type Label struct { + Key string `toml:"key"` + Value string `toml:"value"` +} + +// build.toml + +type BuildTOML struct { + BOM []BOMEntry `toml:"bom"` + Unmet []Unmet `toml:"unmet"` +} + +type Unmet struct { + Name string `toml:"name"` +} + +// store.toml + +type StoreTOML struct { + Data map[string]interface{} `json:"metadata" toml:"metadata"` +} + +// build plan + +type BuildPlan struct { + PlanSections + Or planSectionsList `toml:"or"` +} + +func (p *PlanSections) hasInconsistentVersions() bool { + for _, req := range p.Requires { + if req.hasInconsistentVersions() { + return true + } + } + return false +} + +func (p *PlanSections) hasDoublySpecifiedVersions() bool { + for _, req := range p.Requires { + if req.hasDoublySpecifiedVersions() { + return true + } + } + return false +} + +func (p *PlanSections) hasTopLevelVersions() bool { + for _, req := range p.Requires { + if req.hasTopLevelVersions() { + return true + } + } + return false +} + +type planSectionsList []PlanSections + +func (p *planSectionsList) hasInconsistentVersions() bool { + for _, planSection := range *p { + if planSection.hasInconsistentVersions() { + return true + } + } + return false +} + +func (p *planSectionsList) hasDoublySpecifiedVersions() bool { + for _, planSection := range *p { + if planSection.hasDoublySpecifiedVersions() { + return true + } + } + return false +} + +func (p *planSectionsList) hasTopLevelVersions() bool { + for _, planSection := range *p { + if planSection.hasTopLevelVersions() { + return true + } + } + return false +} + +type PlanSections struct { + Requires []Require `toml:"requires"` + Provides []Provide `toml:"provides"` +} + +type Provide struct { + Name string `toml:"name"` +} + +// buildpack plan + +type Plan struct { + Entries []Require `toml:"entries"` +} + +func (p Plan) filter(unmet []Unmet) Plan { + var out []Require + for _, entry := range p.Entries { + if !containsName(unmet, entry.Name) { + out = append(out, entry) + } + } + return Plan{Entries: out} +} + +func (p Plan) toBOM() []BOMEntry { + var bom []BOMEntry + for _, entry := range p.Entries { + bom = append(bom, BOMEntry{Require: entry}) + } + return bom +} + +func containsName(unmet []Unmet, name string) bool { + for _, u := range unmet { + if u.Name == name { + return true + } + } + return false +} + +// layer content metadata + +type EncoderDecoder interface { + IsSupported(buildpackAPI string) bool + Encode(file *os.File, lmf layertypes.LayerMetadataFile) error + Decode(path string) (layertypes.LayerMetadataFile, string, error) +} + +func defaultEncodersDecoders() []EncoderDecoder { + return []EncoderDecoder{ + // TODO: it's weird that api05 is relevant for buildpack APIs 0.2-0.5 and api06 is relevant for buildpack API 0.6 and up. We should work on it. + api05.NewEncoderDecoder(), + api06.NewEncoderDecoder(), + } +} + +func EncodeLayerMetadataFile(lmf layertypes.LayerMetadataFile, path, buildpackAPI string) error { + fh, err := os.Create(path) + if err != nil { + return err + } + defer fh.Close() + + encoders := defaultEncodersDecoders() + + for _, encoder := range encoders { + if encoder.IsSupported(buildpackAPI) { + return encoder.Encode(fh, lmf) + } + } + return errors.New("couldn't find an encoder") +} + +func DecodeLayerMetadataFile(path, buildpackAPI string) (layertypes.LayerMetadataFile, string, error) { // TODO: pass the logger and print the warning inside (instead of returning a message) + fh, err := os.Open(path) + if os.IsNotExist(err) { + return layertypes.LayerMetadataFile{}, "", nil + } else if err != nil { + return layertypes.LayerMetadataFile{}, "", err + } + defer fh.Close() + + decoders := defaultEncodersDecoders() + + for _, decoder := range decoders { + if decoder.IsSupported(buildpackAPI) { + return decoder.Decode(path) + } + } + return layertypes.LayerMetadataFile{}, "", errors.New("couldn't find a decoder") +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/layertypes/types.go b/vendor/github.com/buildpacks/lifecycle/buildpack/layertypes/types.go new file mode 100644 index 0000000000..93ecda9339 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/layertypes/types.go @@ -0,0 +1,8 @@ +package layertypes + +type LayerMetadataFile struct { + Data interface{} `json:"data" toml:"metadata"` + Build bool `json:"build" toml:"build"` + Launch bool `json:"launch" toml:"launch"` + Cache bool `json:"cache" toml:"cache"` +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/store.go b/vendor/github.com/buildpacks/lifecycle/buildpack/store.go new file mode 100644 index 0000000000..e1aef4c34b --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/store.go @@ -0,0 +1,38 @@ +package buildpack + +import ( + "path/filepath" + + "github.com/BurntSushi/toml" + + "github.com/buildpacks/lifecycle/launch" +) + +type Buildpack interface { + Build(bpPlan Plan, config BuildConfig, bpEnv BuildEnv) (BuildResult, error) + ConfigFile() *Descriptor + Detect(config *DetectConfig, bpEnv BuildEnv) DetectRun +} + +type DirBuildpackStore struct { + Dir string +} + +func NewBuildpackStore(dir string) (*DirBuildpackStore, error) { + dir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + return &DirBuildpackStore{Dir: dir}, nil +} + +func (f *DirBuildpackStore) Lookup(bpID, bpVersion string) (Buildpack, error) { + bpTOML := Descriptor{} + bpPath := filepath.Join(f.Dir, launch.EscapeID(bpID), bpVersion) + tomlPath := filepath.Join(bpPath, "buildpack.toml") + if _, err := toml.DecodeFile(tomlPath, &bpTOML); err != nil { + return nil, err + } + bpTOML.Dir = bpPath + return &bpTOML, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/v05/encoderdecoder.go b/vendor/github.com/buildpacks/lifecycle/buildpack/v05/encoderdecoder.go new file mode 100644 index 0000000000..bb6fb09587 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/v05/encoderdecoder.go @@ -0,0 +1,42 @@ +package v05 + +import ( + "os" + + "github.com/BurntSushi/toml" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack/layertypes" +) + +type EncoderDecoder05 struct { +} + +func NewEncoderDecoder() *EncoderDecoder05 { + return &EncoderDecoder05{} +} + +func (d *EncoderDecoder05) IsSupported(buildpackAPI string) bool { + return api.MustParse(buildpackAPI).LessThan("0.6") +} + +func (d *EncoderDecoder05) Encode(file *os.File, lmf layertypes.LayerMetadataFile) error { + return toml.NewEncoder(file).Encode(lmf) +} + +func (d *EncoderDecoder05) Decode(path string) (layertypes.LayerMetadataFile, string, error) { + var lmf layertypes.LayerMetadataFile + md, err := toml.DecodeFile(path, &lmf) + if err != nil { + return layertypes.LayerMetadataFile{}, "", err + } + msg := "" + if isWrongFormat := typesInTypesTable(md); isWrongFormat { + msg = "Types table isn't supported in this buildpack api version. The launch, build and cache flags should be in the top level. Ignoring the values in the types table." + } + return lmf, msg, nil +} + +func typesInTypesTable(md toml.MetaData) bool { + return md.IsDefined("types") +} diff --git a/vendor/github.com/buildpacks/lifecycle/buildpack/v06/encoderdecoder.go b/vendor/github.com/buildpacks/lifecycle/buildpack/v06/encoderdecoder.go new file mode 100644 index 0000000000..557bb26db6 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/buildpack/v06/encoderdecoder.go @@ -0,0 +1,58 @@ +package v06 + +import ( + "fmt" + "os" + + "github.com/BurntSushi/toml" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack/layertypes" +) + +type EncoderDecoder06 struct { +} + +func NewEncoderDecoder() *EncoderDecoder06 { + return &EncoderDecoder06{} +} + +func (d *EncoderDecoder06) IsSupported(buildpackAPI string) bool { + return api.MustParse(buildpackAPI).AtLeast("0.6") +} + +func (d *EncoderDecoder06) Encode(file *os.File, lmf layertypes.LayerMetadataFile) error { + // omit the types table - all the flags are set to false + type dataTomlFile struct { + Data interface{} `toml:"metadata"` + } + dtf := dataTomlFile{Data: lmf.Data} + return toml.NewEncoder(file).Encode(dtf) +} + +func (d *EncoderDecoder06) Decode(path string) (layertypes.LayerMetadataFile, string, error) { + type typesTable struct { + Build bool `toml:"build"` + Launch bool `toml:"launch"` + Cache bool `toml:"cache"` + } + type layerMetadataTomlFile struct { + Data interface{} `toml:"metadata"` + Types typesTable `toml:"types"` + } + + var lmtf layerMetadataTomlFile + md, err := toml.DecodeFile(path, &lmtf) + if err != nil { + return layertypes.LayerMetadataFile{}, "", err + } + msg := "" + if isWrongFormat := typesInTopLevel(md); isWrongFormat { + msg = fmt.Sprintf("the launch, cache and build flags should be in the types table of %s", path) + } + return layertypes.LayerMetadataFile{Data: lmtf.Data, Build: lmtf.Types.Build, Launch: lmtf.Types.Launch, Cache: lmtf.Types.Cache}, msg, nil +} + +func typesInTopLevel(md toml.MetaData) bool { + return md.IsDefined("build") || md.IsDefined("launch") || md.IsDefined("cache") +} diff --git a/vendor/github.com/buildpacks/lifecycle/cache.go b/vendor/github.com/buildpacks/lifecycle/cache.go new file mode 100644 index 0000000000..6bac9d5c75 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cache.go @@ -0,0 +1,75 @@ +package lifecycle + +import ( + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/platform" +) + +func (e *Exporter) Cache(layersDir string, cacheStore Cache) error { + var err error + if !cacheStore.Exists() { + e.Logger.Info("Layer cache not found") + } + origMeta, err := cacheStore.RetrieveMetadata() + if err != nil { + return errors.Wrap(err, "metadata for previous cache") + } + meta := platform.CacheMetadata{} + + for _, bp := range e.Buildpacks { + bpDir, err := readBuildpackLayersDir(layersDir, bp, e.Logger) + if err != nil { + return errors.Wrapf(err, "reading layers for buildpack '%s'", bp.ID) + } + + bpMD := platform.BuildpackLayersMetadata{ + ID: bp.ID, + Version: bp.Version, + Layers: map[string]platform.BuildpackLayerMetadata{}, + } + for _, layer := range bpDir.findLayers(forCached) { + layer := layer + if !layer.hasLocalContents() { + e.Logger.Warnf("Failed to cache layer '%s' because it has no contents", layer.Identifier()) + continue + } + lmd, err := layer.read() + if err != nil { + e.Logger.Warnf("Failed to cache layer '%s' because of error reading metadata: %s", layer.Identifier(), err) + continue + } + origLayerMetadata := origMeta.MetadataForBuildpack(bp.ID).Layers[layer.name()] + if lmd.SHA, err = e.addOrReuseCacheLayer(cacheStore, &layer, origLayerMetadata.SHA); err != nil { + e.Logger.Warnf("Failed to cache layer '%s': %s", layer.Identifier(), err) + continue + } + bpMD.Layers[layer.name()] = lmd + } + meta.Buildpacks = append(meta.Buildpacks, bpMD) + } + + if err := cacheStore.SetMetadata(meta); err != nil { + return errors.Wrap(err, "setting cache metadata") + } + if err := cacheStore.Commit(); err != nil { + return errors.Wrap(err, "committing cache") + } + + return nil +} + +func (e *Exporter) addOrReuseCacheLayer(cache Cache, layerDir layerDir, previousSHA string) (string, error) { + layer, err := e.LayerFactory.DirLayer(layerDir.Identifier(), layerDir.Path()) + if err != nil { + return "", errors.Wrapf(err, "creating layer '%s'", layerDir.Identifier()) + } + if layer.Digest == previousSHA { + e.Logger.Infof("Reusing cache layer '%s'\n", layer.ID) + e.Logger.Debugf("Layer '%s' SHA: %s\n", layer.ID, layer.Digest) + return layer.Digest, cache.ReuseLayer(previousSHA) + } + e.Logger.Infof("Adding cache layer '%s'\n", layer.ID) + e.Logger.Debugf("Layer '%s' SHA: %s\n", layer.ID, layer.Digest) + return layer.Digest, cache.AddLayerFile(layer.TarPath, layer.Digest) +} diff --git a/vendor/github.com/buildpacks/lifecycle/cmd/command.go b/vendor/github.com/buildpacks/lifecycle/cmd/command.go new file mode 100644 index 0000000000..6d55ae3516 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cmd/command.go @@ -0,0 +1,62 @@ +package cmd + +import ( + "io/ioutil" + "log" + "os" +) + +// Command defines the interface for running the lifecycle phases +type Command interface { + // DefineFlags defines flags + DefineFlags() + + // Args validates arguments and flags + Args(nargs int, args []string) error + + // Privileges validates the needed privileges + Privileges() error + + // Exec executes the command + Exec() error +} + +func Run(c Command, asSubcommand bool) { + var ( + printVersion bool + logLevel string + noColor bool + ) + + log.SetOutput(ioutil.Discard) + FlagVersion(&printVersion) + FlagLogLevel(&logLevel) + FlagNoColor(&noColor) + c.DefineFlags() + if asSubcommand { + if err := flagSet.Parse(os.Args[2:]); err != nil { + //flagSet exits on error, we shouldn't get here + Exit(err) + } + } else { + if err := flagSet.Parse(os.Args[1:]); err != nil { + //flagSet exits on error, we shouldn't get here + Exit(err) + } + } + DisableColor(noColor) + + if printVersion { + ExitWithVersion() + } + if err := SetLogLevel(logLevel); err != nil { + Exit(err) + } + if err := c.Args(flagSet.NArg(), flagSet.Args()); err != nil { + Exit(err) + } + if err := c.Privileges(); err != nil { + Exit(err) + } + Exit(c.Exec()) +} diff --git a/vendor/github.com/buildpacks/lifecycle/cmd/exit.go b/vendor/github.com/buildpacks/lifecycle/cmd/exit.go new file mode 100644 index 0000000000..7c4013e36e --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cmd/exit.go @@ -0,0 +1,87 @@ +package cmd + +import ( + "fmt" + "os" + "strings" +) + +const ( + // lifecycle errors not specific to any phase: 1-99 + CodeFailed = 1 // CodeFailed indicates generic lifecycle error + // 2: reserved + CodeInvalidArgs = 3 + // 4: CodeInvalidEnv + // 5: CodeNotFound + // 9: CodeFailedUpdate + + // API errors + CodeIncompatiblePlatformAPI = 11 + CodeIncompatibleBuildpackAPI = 12 +) + +type LifecycleExitError int + +const ( + FailedDetect LifecycleExitError = iota + FailedDetectWithErrors // no buildpacks detected + DetectError // no buildpacks detected and at least one errored + AnalyzeError // generic analyze error + RestoreError // generic restore error + FailedBuildWithErrors // buildpack error during /bin/build + BuildError // generic build error + ExportError // generic export error + RebaseError // generic rebase error + LaunchError // generic launch error +) + +type Platform interface { + API() string + CodeFor(errType LifecycleExitError) int +} + +type ErrorFail struct { + Err error + Code int + Action []string +} + +func (e *ErrorFail) Error() string { + message := "failed to " + strings.Join(e.Action, " ") + if e.Err == nil { + return message + } + return fmt.Sprintf("%s: %s", message, e.Err) +} + +func FailCode(code int, action ...string) *ErrorFail { + return FailErrCode(nil, code, action...) +} + +func FailErr(err error, action ...string) *ErrorFail { + code := CodeFailed + if err, ok := err.(*ErrorFail); ok { + code = err.Code + } + return FailErrCode(err, code, action...) +} + +func FailErrCode(err error, code int, action ...string) *ErrorFail { + return &ErrorFail{Err: err, Code: code, Action: action} +} + +func Exit(err error) { + if err == nil { + os.Exit(0) + } + DefaultLogger.Errorf("%s\n", err) + if err, ok := err.(*ErrorFail); ok { + os.Exit(err.Code) + } + os.Exit(CodeFailed) +} + +func ExitWithVersion() { + DefaultLogger.Infof(buildVersion()) + os.Exit(0) +} diff --git a/vendor/github.com/buildpacks/lifecycle/cmd/flags.go b/vendor/github.com/buildpacks/lifecycle/cmd/flags.go new file mode 100644 index 0000000000..43fbc89aa1 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cmd/flags.go @@ -0,0 +1,262 @@ +package cmd + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strconv" + + "github.com/buildpacks/lifecycle/api" +) + +var ( + DefaultAppDir = filepath.Join(rootDir, "workspace") + DefaultBuildpacksDir = filepath.Join(rootDir, "cnb", "buildpacks") + DefaultDeprecationMode = DeprecationModeWarn + DefaultLauncherPath = filepath.Join(rootDir, "cnb", "lifecycle", "launcher"+execExt) + DefaultLayersDir = filepath.Join(rootDir, "layers") + DefaultLogLevel = "info" + DefaultPlatformAPI = "0.3" + DefaultPlatformDir = filepath.Join(rootDir, "platform") + DefaultProcessType = "web" + DefaultStackPath = filepath.Join(rootDir, "cnb", "stack.toml") + + DefaultAnalyzedFile = "analyzed.toml" + DefaultGroupFile = "group.toml" + DefaultOrderFile = "order.toml" + DefaultPlanFile = "plan.toml" + DefaultProjectMetadataFile = "project-metadata.toml" + DefaultReportFile = "report.toml" + + PlaceholderAnalyzedPath = filepath.Join("", DefaultAnalyzedFile) + PlaceholderGroupPath = filepath.Join("", DefaultGroupFile) + PlaceholderPlanPath = filepath.Join("", DefaultPlanFile) + PlaceholderProjectMetadataPath = filepath.Join("", DefaultProjectMetadataFile) + PlaceholderReportPath = filepath.Join("", DefaultReportFile) + PlaceholderOrderPath = filepath.Join("", DefaultOrderFile) +) + +const ( + EnvAnalyzedPath = "CNB_ANALYZED_PATH" + EnvAppDir = "CNB_APP_DIR" + EnvBuildpacksDir = "CNB_BUILDPACKS_DIR" + EnvCacheDir = "CNB_CACHE_DIR" + EnvCacheImage = "CNB_CACHE_IMAGE" + EnvDeprecationMode = "CNB_DEPRECATION_MODE" + EnvGID = "CNB_GROUP_ID" + EnvGroupPath = "CNB_GROUP_PATH" + EnvLaunchCacheDir = "CNB_LAUNCH_CACHE_DIR" + EnvLayersDir = "CNB_LAYERS_DIR" + EnvLogLevel = "CNB_LOG_LEVEL" + EnvNoColor = "CNB_NO_COLOR" // defaults to false + EnvOrderPath = "CNB_ORDER_PATH" + EnvPlanPath = "CNB_PLAN_PATH" + EnvPlatformAPI = "CNB_PLATFORM_API" + EnvPlatformDir = "CNB_PLATFORM_DIR" + EnvPreviousImage = "CNB_PREVIOUS_IMAGE" + EnvProcessType = "CNB_PROCESS_TYPE" + EnvProjectMetadataPath = "CNB_PROJECT_METADATA_PATH" + EnvReportPath = "CNB_REPORT_PATH" + EnvRunImage = "CNB_RUN_IMAGE" + EnvSkipLayers = "CNB_ANALYZE_SKIP_LAYERS" // defaults to false + EnvSkipRestore = "CNB_SKIP_RESTORE" // defaults to false + EnvStackPath = "CNB_STACK_PATH" + EnvUID = "CNB_USER_ID" + EnvUseDaemon = "CNB_USE_DAEMON" // defaults to false +) + +var flagSet = flag.NewFlagSet("lifecycle", flag.ExitOnError) + +func FlagAnalyzedPath(analyzedPath *string) { + flagSet.StringVar(analyzedPath, "analyzed", EnvOrDefault(EnvAnalyzedPath, PlaceholderAnalyzedPath), "path to analyzed.toml") +} + +func DefaultAnalyzedPath(platformAPI, layersDir string) string { + return defaultPath(DefaultAnalyzedFile, platformAPI, layersDir) +} + +func FlagAppDir(appDir *string) { + flagSet.StringVar(appDir, "app", EnvOrDefault(EnvAppDir, DefaultAppDir), "path to app directory") +} + +func FlagBuildpacksDir(buildpacksDir *string) { + flagSet.StringVar(buildpacksDir, "buildpacks", EnvOrDefault(EnvBuildpacksDir, DefaultBuildpacksDir), "path to buildpacks directory") +} + +func FlagCacheDir(cacheDir *string) { + flagSet.StringVar(cacheDir, "cache-dir", os.Getenv(EnvCacheDir), "path to cache directory") +} + +func FlagCacheImage(cacheImage *string) { + flagSet.StringVar(cacheImage, "cache-image", os.Getenv(EnvCacheImage), "cache image tag name") +} + +func FlagGID(gid *int) { + flagSet.IntVar(gid, "gid", intEnv(EnvGID), "GID of user's group in the stack's build and run images") +} + +func FlagGroupPath(groupPath *string) { + flagSet.StringVar(groupPath, "group", EnvOrDefault(EnvGroupPath, PlaceholderGroupPath), "path to group.toml") +} + +func DefaultGroupPath(platformAPI, layersDir string) string { + return defaultPath(DefaultGroupFile, platformAPI, layersDir) +} + +func FlagLaunchCacheDir(launchCacheDir *string) { + flagSet.StringVar(launchCacheDir, "launch-cache", os.Getenv(EnvLaunchCacheDir), "path to launch cache directory") +} + +func FlagLauncherPath(launcherPath *string) { + flagSet.StringVar(launcherPath, "launcher", DefaultLauncherPath, "path to launcher binary") +} + +func FlagLayersDir(layersDir *string) { + flagSet.StringVar(layersDir, "layers", EnvOrDefault(EnvLayersDir, DefaultLayersDir), "path to layers directory") +} + +func FlagNoColor(skip *bool) { + flagSet.BoolVar(skip, "no-color", BoolEnv(EnvNoColor), "disable color output") +} + +func FlagOrderPath(orderPath *string) { + flagSet.StringVar(orderPath, "order", EnvOrDefault(EnvOrderPath, PlaceholderOrderPath), "path to order.toml") +} + +func DefaultOrderPath(platformAPI, layersDir string) string { + cnbOrderPath := filepath.Join(rootDir, "cnb", "order.toml") + + // prior to Platform API 0.6, the default is /cnb/order.toml + if api.MustParse(platformAPI).LessThan("0.6") { + return cnbOrderPath + } + + // the default is //order.toml or /cnb/order.toml if not present + layersOrderPath := filepath.Join(layersDir, "order.toml") + if _, err := os.Stat(layersOrderPath); os.IsNotExist(err) { + return cnbOrderPath + } + return layersOrderPath +} + +func FlagPlanPath(planPath *string) { + flagSet.StringVar(planPath, "plan", EnvOrDefault(EnvPlanPath, PlaceholderPlanPath), "path to plan.toml") +} + +func DefaultPlanPath(platformAPI, layersDir string) string { + return defaultPath(DefaultPlanFile, platformAPI, layersDir) +} + +func FlagPlatformDir(platformDir *string) { + flagSet.StringVar(platformDir, "platform", EnvOrDefault(EnvPlatformDir, DefaultPlatformDir), "path to platform directory") +} + +func FlagPreviousImage(image *string) { + flagSet.StringVar(image, "previous-image", os.Getenv(EnvPreviousImage), "reference to previous image") +} + +func FlagReportPath(reportPath *string) { + flagSet.StringVar(reportPath, "report", EnvOrDefault(EnvReportPath, PlaceholderReportPath), "path to report.toml") +} + +func DefaultReportPath(platformAPI, layersDir string) string { + return defaultPath(DefaultReportFile, platformAPI, layersDir) +} + +func FlagRunImage(runImage *string) { + flagSet.StringVar(runImage, "run-image", os.Getenv(EnvRunImage), "reference to run image") +} + +func FlagSkipLayers(skip *bool) { + flagSet.BoolVar(skip, "skip-layers", BoolEnv(EnvSkipLayers), "do not provide layer metadata to buildpacks") +} + +func FlagSkipRestore(skip *bool) { + flagSet.BoolVar(skip, "skip-restore", BoolEnv(EnvSkipRestore), "do not restore layers or layer metadata") +} + +func FlagStackPath(stackPath *string) { + flagSet.StringVar(stackPath, "stack", EnvOrDefault(EnvStackPath, DefaultStackPath), "path to stack.toml") +} + +func FlagTags(tags *StringSlice) { + flagSet.Var(tags, "tag", "additional tags") +} + +func FlagUID(uid *int) { + flagSet.IntVar(uid, "uid", intEnv(EnvUID), "UID of user in the stack's build and run images") +} + +func FlagUseDaemon(use *bool) { + flagSet.BoolVar(use, "daemon", BoolEnv(EnvUseDaemon), "export to docker daemon") +} + +func FlagVersion(version *bool) { + flagSet.BoolVar(version, "version", false, "show version") +} + +func FlagLogLevel(level *string) { + flagSet.StringVar(level, "log-level", EnvOrDefault(EnvLogLevel, DefaultLogLevel), "logging level") +} + +func FlagProjectMetadataPath(projectMetadataPath *string) { + flagSet.StringVar(projectMetadataPath, "project-metadata", EnvOrDefault(EnvProjectMetadataPath, PlaceholderProjectMetadataPath), "path to project-metadata.toml") +} + +func DefaultProjectMetadataPath(platformAPI, layersDir string) string { + return defaultPath(DefaultProjectMetadataFile, platformAPI, layersDir) +} + +func FlagProcessType(processType *string) { + flagSet.StringVar(processType, "process-type", os.Getenv(EnvProcessType), "default process type") +} + +func DeprecatedFlagRunImage(image *string) { + flagSet.StringVar(image, "image", "", "reference to run image") +} + +type StringSlice []string + +func (s *StringSlice) String() string { + return fmt.Sprintf("%+v", *s) +} + +func (s *StringSlice) Set(value string) error { + *s = append(*s, value) + return nil +} + +func intEnv(k string) int { + v := os.Getenv(k) + d, err := strconv.Atoi(v) + if err != nil { + return 0 + } + return d +} + +func BoolEnv(k string) bool { + v := os.Getenv(k) + b, err := strconv.ParseBool(v) + if err != nil { + return false + } + return b +} + +func EnvOrDefault(key string, defaultVal string) string { + if envVal := os.Getenv(key); envVal != "" { + return envVal + } + return defaultVal +} + +func defaultPath(fileName, platformAPI, layersDir string) string { + if (api.MustParse(platformAPI).LessThan("0.5")) || (layersDir == "") { + // prior to platform api 0.5, the default directory was the working dir. + // layersDir is unset when this call comes from the rebaser - will be fixed as part of https://github.com/buildpacks/spec/issues/156 + return filepath.Join(".", fileName) + } + return filepath.Join(layersDir, fileName) // starting from platform api 0.5, the default directory is the layers dir. +} diff --git a/vendor/github.com/buildpacks/lifecycle/cmd/flags_unix.go b/vendor/github.com/buildpacks/lifecycle/cmd/flags_unix.go new file mode 100644 index 0000000000..1b1412ed64 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cmd/flags_unix.go @@ -0,0 +1,8 @@ +// +build linux darwin + +package cmd + +const ( + rootDir = "/" + execExt = "" +) diff --git a/vendor/github.com/buildpacks/lifecycle/cmd/flags_windows.go b/vendor/github.com/buildpacks/lifecycle/cmd/flags_windows.go new file mode 100644 index 0000000000..b426acdc1b --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cmd/flags_windows.go @@ -0,0 +1,6 @@ +package cmd + +const ( + rootDir = `c:\` + execExt = ".exe" +) diff --git a/vendor/github.com/buildpacks/lifecycle/cmd/logs.go b/vendor/github.com/buildpacks/lifecycle/cmd/logs.go new file mode 100644 index 0000000000..00c2d710b3 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cmd/logs.go @@ -0,0 +1,87 @@ +package cmd + +import ( + "io" + "os" + "sync" + + "github.com/apex/log" + "github.com/heroku/color" +) + +const ( + errorLevelText = "ERROR: " + warnLevelText = "Warning: " +) + +func init() { + // TODO: uncomment when buildpacks/pack#493 (lifecycle containers with a tty) is implemented + //color.Disable(!terminal.IsTerminal(int(os.Stdout.Fd()))) +} + +var ( + DefaultLogger = &Logger{ + &log.Logger{ + Handler: &handler{ + writer: Stdout, + }, + }, + } + Stdout = color.NewConsole(os.Stdout) + Stderr = color.NewConsole(os.Stderr) + warnStyle = color.New(color.FgYellow, color.Bold).SprintfFunc() + errorStyle = color.New(color.FgRed, color.Bold).SprintfFunc() + phaseStyle = color.New(color.FgCyan).SprintfFunc() +) + +type Logger struct { + *log.Logger +} + +func (l *Logger) Phase(name string) { + l.Infof(phaseStyle("===> %s", name)) +} + +func SetLogLevel(level string) *ErrorFail { + var err error + DefaultLogger.Level, err = log.ParseLevel(level) + if err != nil { + return FailErrCode(err, CodeInvalidArgs, "parse log level") + } + + return nil +} + +func DisableColor(noColor bool) { + Stdout.DisableColors(noColor) + Stderr.DisableColors(noColor) +} + +type handler struct { + mu sync.Mutex + writer io.Writer +} + +func (h *handler) HandleLog(entry *log.Entry) error { + h.mu.Lock() + defer h.mu.Unlock() + + var err error + switch entry.Level { + case log.WarnLevel: + _, err = h.writer.Write([]byte(warnStyle(warnLevelText) + appendMissingLineFeed(entry.Message))) + case log.ErrorLevel: + _, err = h.writer.Write([]byte(errorStyle(errorLevelText) + appendMissingLineFeed(entry.Message))) + default: + _, err = h.writer.Write([]byte(appendMissingLineFeed(entry.Message))) + } + return err +} + +func appendMissingLineFeed(msg string) string { + buff := []byte(msg) + if buff[len(buff)-1] != '\n' { + buff = append(buff, '\n') + } + return string(buff) +} diff --git a/vendor/github.com/buildpacks/lifecycle/cmd/version.go b/vendor/github.com/buildpacks/lifecycle/cmd/version.go new file mode 100644 index 0000000000..a17dcb7020 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cmd/version.go @@ -0,0 +1,106 @@ +package cmd + +import ( + "fmt" + "strings" + + "github.com/buildpacks/lifecycle/api" +) + +// The following variables are injected at compile time. +var ( + // Version is the version of the lifecycle and all produced binaries. + Version = "0.0.0" + // SCMCommit is the commit information provided by SCM. + SCMCommit = "" + // SCMRepository is the source repository. + SCMRepository = "" + + DeprecationMode = EnvOrDefault(EnvDeprecationMode, DefaultDeprecationMode) +) + +const ( + DeprecationModeQuiet = "quiet" + DeprecationModeWarn = "warn" + DeprecationModeError = "error" +) + +// buildVersion is a display format of the version and build metadata in compliance with semver. +func buildVersion() string { + // noinspection GoBoolExpressions + if SCMCommit == "" || strings.Contains(Version, SCMCommit) { + return Version + } + + return fmt.Sprintf("%s+%s", Version, SCMCommit) +} + +func VerifyPlatformAPI(requested string) error { + requestedAPI, err := api.NewVersion(requested) + if err != nil { + return FailErrCode( + fmt.Errorf("parse platform API '%s'", requested), + CodeIncompatiblePlatformAPI, + ) + } + if api.Platform.IsSupported(requestedAPI) { + if api.Platform.IsDeprecated(requestedAPI) { + switch DeprecationMode { + case DeprecationModeQuiet: + break + case DeprecationModeError: + DefaultLogger.Errorf("Platform requested deprecated API '%s'", requested) + DefaultLogger.Errorf("Deprecated APIs are disable by %s=%s", EnvDeprecationMode, DeprecationModeError) + return platformAPIError(requested) + case DeprecationModeWarn: + DefaultLogger.Warnf("Platform requested deprecated API '%s'", requested) + default: + DefaultLogger.Warnf("Platform requested deprecated API '%s'", requested) + } + } + return nil + } + return platformAPIError(requested) +} + +func VerifyBuildpackAPI(bp string, requested string) error { + requestedAPI, err := api.NewVersion(requested) + if err != nil { + return FailErrCode( + fmt.Errorf("parse buildpack API '%s' for buildpack '%s'", requestedAPI, bp), + CodeIncompatibleBuildpackAPI, + ) + } + if api.Buildpack.IsSupported(requestedAPI) { + if api.Buildpack.IsDeprecated(requestedAPI) { + switch DeprecationMode { + case DeprecationModeQuiet: + break + case DeprecationModeError: + DefaultLogger.Errorf("Buildpack '%s' requests deprecated API '%s'", bp, requested) + DefaultLogger.Errorf("Deprecated APIs are disable by %s=%s", EnvDeprecationMode, DeprecationModeError) + return buildpackAPIError(bp, requested) + case DeprecationModeWarn: + DefaultLogger.Warnf("Buildpack '%s' requests deprecated API '%s'", bp, requested) + default: + DefaultLogger.Warnf("Buildpack '%s' requests deprecated API '%s'", bp, requested) + } + } + return nil + } + return buildpackAPIError(bp, requested) +} + +func platformAPIError(requested string) error { + return FailErrCode( + fmt.Errorf("set platform API: platform API version '%s' is incompatible with the lifecycle", requested), + CodeIncompatiblePlatformAPI, + ) +} + +func buildpackAPIError(bp string, requested string) error { + return FailErrCode( + fmt.Errorf("set API for buildpack '%s': buildpack API version '%s' is incompatible with the lifecycle", bp, requested), + CodeIncompatibleBuildpackAPI, + ) +} diff --git a/vendor/github.com/buildpacks/lifecycle/codecov.yml b/vendor/github.com/buildpacks/lifecycle/codecov.yml new file mode 100644 index 0000000000..d1cc37613e --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/codecov.yml @@ -0,0 +1,18 @@ +codecov: + notify: + after_n_builds: 2 + +coverage: + round: up + status: + project: + default: + threshold: 1% + patch: + default: + threshold: 10% + +comment: + layout: "reach,diff,flags" + require_changes: yes + after_n_builds: 2 diff --git a/vendor/github.com/buildpacks/lifecycle/cosign.pub b/vendor/github.com/buildpacks/lifecycle/cosign.pub new file mode 100644 index 0000000000..32522007e1 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/cosign.pub @@ -0,0 +1,5 @@ +-----BEGIN PUBLIC KEY----- +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEVHkU6XfpwHxNLpqvPOWrGBvwQuN5 +YftzwwYnvdxnu5aoCnkfOpLks+3r+tQQRbcjHsgZr/HjSFFiia5WAwNq4Q== +-----END PUBLIC KEY----- + diff --git a/vendor/github.com/buildpacks/lifecycle/detector.go b/vendor/github.com/buildpacks/lifecycle/detector.go new file mode 100644 index 0000000000..8b3c4d237a --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/detector.go @@ -0,0 +1,415 @@ +package lifecycle + +import ( + "fmt" + "os" + "sync" + + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/env" + "github.com/buildpacks/lifecycle/platform" +) + +const ( + CodeDetectPass = 0 + CodeDetectFail = 100 +) + +var ( + ErrFailedDetection = errors.New("no buildpacks participating") + ErrBuildpack = errors.New("buildpack(s) failed with err") +) + +type Resolver interface { + Resolve(done []buildpack.GroupBuildpack, detectRuns *sync.Map) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) +} + +type Detector struct { + buildpack.DetectConfig + Platform Platform + Resolver Resolver + Runs *sync.Map + Store BuildpackStore +} + +func NewDetector(config buildpack.DetectConfig, buildpacksDir string, platform Platform) (*Detector, error) { + resolver := &DefaultResolver{ + Logger: config.Logger, + } + store, err := buildpack.NewBuildpackStore(buildpacksDir) + if err != nil { + return nil, err + } + return &Detector{ + DetectConfig: config, + Platform: platform, + Resolver: resolver, + Runs: &sync.Map{}, + Store: store, + }, nil +} + +func (d *Detector) Detect(order buildpack.Order) (buildpack.Group, platform.BuildPlan, error) { + return d.DetectOrder(order) +} + +func (d *Detector) DetectOrder(order buildpack.Order) (buildpack.Group, platform.BuildPlan, error) { + bps, entries, err := d.detectOrder(order, nil, nil, false, &sync.WaitGroup{}) + if err == ErrBuildpack { + err = buildpack.NewLifecycleError(err, buildpack.ErrTypeBuildpack) + } else if err == ErrFailedDetection { + err = buildpack.NewLifecycleError(err, buildpack.ErrTypeFailedDetection) + } + for i := range entries { + for j := range entries[i].Requires { + entries[i].Requires[j].ConvertVersionToMetadata() + } + } + return buildpack.Group{Group: bps}, platform.BuildPlan{Entries: entries}, err +} + +func (d *Detector) detectOrder(order buildpack.Order, done, next []buildpack.GroupBuildpack, optional bool, wg *sync.WaitGroup) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) { + ngroup := buildpack.Group{Group: next} + buildpackErr := false + for _, group := range order { + // FIXME: double-check slice safety here + found, plan, err := d.detectGroup(group.Append(ngroup), done, wg) + if err == ErrBuildpack { + buildpackErr = true + } + if err == ErrFailedDetection || err == ErrBuildpack { + wg = &sync.WaitGroup{} + continue + } + return found, plan, err + } + if optional { + return d.detectGroup(ngroup, done, wg) + } + + if buildpackErr { + return nil, nil, ErrBuildpack + } + return nil, nil, ErrFailedDetection +} + +func (d *Detector) detectGroup(group buildpack.Group, done []buildpack.GroupBuildpack, wg *sync.WaitGroup) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) { + for i, groupBp := range group.Group { + key := groupBp.String() + if hasID(done, groupBp.ID) { + continue + } + + bp, err := d.Store.Lookup(groupBp.ID, groupBp.Version) + if err != nil { + return nil, nil, err + } + + bpDesc := bp.ConfigFile() + groupBp.API = bpDesc.API + groupBp.Homepage = bpDesc.Buildpack.Homepage + + if bpDesc.IsMetaBuildpack() { + // TODO: double-check slice safety here + // FIXME: cyclical references lead to infinite recursion + return d.detectOrder(bpDesc.Order, done, group.Group[i+1:], groupBp.Optional, wg) + } + + bpEnv := env.NewBuildEnv(os.Environ()) + + done = append(done, groupBp) + wg.Add(1) + go func(key string, bp Buildpack) { + if _, ok := d.Runs.Load(key); !ok { + d.Runs.Store(key, bp.Detect(&d.DetectConfig, bpEnv)) + } + wg.Done() + }(key, bp) + } + + wg.Wait() + + return d.Resolver.Resolve(done, d.Runs) +} + +func hasID(bps []buildpack.GroupBuildpack, id string) bool { + for _, bp := range bps { + if bp.ID == id { + return true + } + } + return false +} + +type DefaultResolver struct { + Logger Logger +} + +// Resolve aggregates the detect output for a group of buildpacks and tries to resolve a build plan for the group. +// If any required buildpack in the group failed detection or a build plan cannot be resolved, it returns an error. +func (r *DefaultResolver) Resolve(done []buildpack.GroupBuildpack, detectRuns *sync.Map) ([]buildpack.GroupBuildpack, []platform.BuildPlanEntry, error) { + var groupRuns []buildpack.DetectRun + for _, bp := range done { + t, ok := detectRuns.Load(bp.String()) + if !ok { + return nil, nil, errors.Errorf("missing detection of '%s'", bp) + } + run := t.(buildpack.DetectRun) + outputLogf := r.Logger.Debugf + + switch run.Code { + case CodeDetectPass, CodeDetectFail: + default: + outputLogf = r.Logger.Infof + } + + if len(run.Output) > 0 { + outputLogf("======== Output: %s ========", bp) + outputLogf(string(run.Output)) + } + if run.Err != nil { + outputLogf("======== Error: %s ========", bp) + outputLogf(run.Err.Error()) + } + groupRuns = append(groupRuns, run) + } + + r.Logger.Debugf("======== Results ========") + + results := detectResults{} + detected := true + buildpackErr := false + for i, bp := range done { + run := groupRuns[i] + switch run.Code { + case CodeDetectPass: + r.Logger.Debugf("pass: %s", bp) + results = append(results, detectResult{bp, run}) + case CodeDetectFail: + if bp.Optional { + r.Logger.Debugf("skip: %s", bp) + } else { + r.Logger.Debugf("fail: %s", bp) + } + detected = detected && bp.Optional + case -1: + r.Logger.Infof("err: %s", bp) + buildpackErr = true + detected = detected && bp.Optional + default: + r.Logger.Infof("err: %s (%d)", bp, run.Code) + buildpackErr = true + detected = detected && bp.Optional + } + } + if !detected { + if buildpackErr { + return nil, nil, ErrBuildpack + } + return nil, nil, ErrFailedDetection + } + + i := 0 + deps, trial, err := results.runTrials(func(trial detectTrial) (depMap, detectTrial, error) { + i++ + return r.runTrial(i, trial) + }) + if err != nil { + return nil, nil, err + } + + if len(done) != len(trial) { + r.Logger.Infof("%d of %d buildpacks participating", len(trial), len(done)) + } + + maxLength := 0 + for _, t := range trial { + l := len(t.ID) + if l > maxLength { + maxLength = l + } + } + + f := fmt.Sprintf("%%-%ds %%s", maxLength) + + for _, t := range trial { + r.Logger.Infof(f, t.ID, t.Version) + } + + var found []buildpack.GroupBuildpack + for _, r := range trial { + found = append(found, r.GroupBuildpack.NoOpt()) + } + var plan []platform.BuildPlanEntry + for _, dep := range deps { + plan = append(plan, dep.BuildPlanEntry.NoOpt()) + } + return found, plan, nil +} + +func (r *DefaultResolver) runTrial(i int, trial detectTrial) (depMap, detectTrial, error) { + r.Logger.Debugf("Resolving plan... (try #%d)", i) + + var deps depMap + retry := true + for retry { + retry = false + deps = newDepMap(trial) + + if err := deps.eachUnmetRequire(func(name string, bp buildpack.GroupBuildpack) error { + retry = true + if !bp.Optional { + r.Logger.Debugf("fail: %s requires %s", bp, name) + return ErrFailedDetection + } + r.Logger.Debugf("skip: %s requires %s", bp, name) + trial = trial.remove(bp) + return nil + }); err != nil { + return nil, nil, err + } + + if err := deps.eachUnmetProvide(func(name string, bp buildpack.GroupBuildpack) error { + retry = true + if !bp.Optional { + r.Logger.Debugf("fail: %s provides unused %s", bp, name) + return ErrFailedDetection + } + r.Logger.Debugf("skip: %s provides unused %s", bp, name) + trial = trial.remove(bp) + return nil + }); err != nil { + return nil, nil, err + } + } + + if len(trial) == 0 { + r.Logger.Debugf("fail: no viable buildpacks in group") + return nil, nil, ErrFailedDetection + } + return deps, trial, nil +} + +type detectResult struct { + buildpack.GroupBuildpack + buildpack.DetectRun +} + +func (r *detectResult) options() []detectOption { + var out []detectOption + for i, sections := range append([]buildpack.PlanSections{r.PlanSections}, r.Or...) { + bp := r.GroupBuildpack + bp.Optional = bp.Optional && i == len(r.Or) + out = append(out, detectOption{bp, sections}) + } + return out +} + +type detectResults []detectResult +type trialFunc func(detectTrial) (depMap, detectTrial, error) + +func (rs detectResults) runTrials(f trialFunc) (depMap, detectTrial, error) { + return rs.runTrialsFrom(nil, f) +} + +func (rs detectResults) runTrialsFrom(prefix detectTrial, f trialFunc) (depMap, detectTrial, error) { + if len(rs) == 0 { + deps, trial, err := f(prefix) + return deps, trial, err + } + + var lastErr error + for _, option := range rs[0].options() { + deps, trial, err := rs[1:].runTrialsFrom(append(prefix, option), f) + if err == nil { + return deps, trial, nil + } + lastErr = err + } + return nil, nil, lastErr +} + +type detectOption struct { + buildpack.GroupBuildpack + buildpack.PlanSections +} + +type detectTrial []detectOption + +func (ts detectTrial) remove(bp buildpack.GroupBuildpack) detectTrial { + var out detectTrial + for _, t := range ts { + if t.GroupBuildpack != bp { + out = append(out, t) + } + } + return out +} + +type depEntry struct { + platform.BuildPlanEntry + earlyRequires []buildpack.GroupBuildpack + extraProvides []buildpack.GroupBuildpack +} + +type depMap map[string]depEntry + +func newDepMap(trial detectTrial) depMap { + m := depMap{} + for _, option := range trial { + for _, p := range option.Provides { + m.provide(option.GroupBuildpack, p) + } + for _, r := range option.Requires { + m.require(option.GroupBuildpack, r) + } + } + return m +} + +func (m depMap) provide(bp buildpack.GroupBuildpack, provide buildpack.Provide) { + entry := m[provide.Name] + entry.extraProvides = append(entry.extraProvides, bp) + m[provide.Name] = entry +} + +func (m depMap) require(bp buildpack.GroupBuildpack, require buildpack.Require) { + entry := m[require.Name] + entry.Providers = append(entry.Providers, entry.extraProvides...) + entry.extraProvides = nil + + if len(entry.Providers) == 0 { + entry.earlyRequires = append(entry.earlyRequires, bp) + } else { + entry.Requires = append(entry.Requires, require) + } + m[require.Name] = entry +} + +func (m depMap) eachUnmetProvide(f func(name string, bp buildpack.GroupBuildpack) error) error { + for name, entry := range m { + if len(entry.extraProvides) != 0 { + for _, bp := range entry.extraProvides { + if err := f(name, bp); err != nil { + return err + } + } + } + } + return nil +} + +func (m depMap) eachUnmetRequire(f func(name string, bp buildpack.GroupBuildpack) error) error { + for name, entry := range m { + if len(entry.earlyRequires) != 0 { + for _, bp := range entry.earlyRequires { + if err := f(name, bp); err != nil { + return err + } + } + } + } + return nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/env/build.go b/vendor/github.com/buildpacks/lifecycle/env/build.go new file mode 100644 index 0000000000..efa26795f1 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/env/build.go @@ -0,0 +1,79 @@ +package env + +import ( + "runtime" + "strings" +) + +var BuildEnvIncludelist = []string{ + "CNB_STACK_ID", + "HOSTNAME", + "HOME", + "HTTPS_PROXY", + "https_proxy", + "HTTP_PROXY", + "http_proxy", + "NO_PROXY", + "no_proxy", +} + +var ignoreEnvVarCase = runtime.GOOS == "windows" + +// NewBuildEnv returns a build-time Env from the given environment. +// +// Keys in the BuildEnvIncludelist will be added to the Environment. +func NewBuildEnv(environ []string) *Env { + envFilter := isNotMember(BuildEnvIncludelist, flattenMap(POSIXBuildEnv)) + + return &Env{ + RootDirMap: POSIXBuildEnv, + Vars: varsFromEnv(environ, ignoreEnvVarCase, envFilter), + } +} + +func matches(k1, k2 string) bool { + if ignoreEnvVarCase { + k1 = strings.ToUpper(k1) + k2 = strings.ToUpper(k2) + } + return k1 == k2 +} + +var POSIXBuildEnv = map[string][]string{ + "bin": { + "PATH", + }, + "lib": { + "LD_LIBRARY_PATH", + "LIBRARY_PATH", + }, + "include": { + "CPATH", + }, + "pkgconfig": { + "PKG_CONFIG_PATH", + }, +} + +func isNotMember(lists ...[]string) func(string) bool { + return func(key string) bool { + for _, list := range lists { + for _, wk := range list { + if matches(wk, key) { + // keep in env + return false + } + } + } + return true + } +} + +func flattenMap(m map[string][]string) []string { + result := make([]string, 0) + for _, subList := range m { + result = append(result, subList...) + } + + return result +} diff --git a/vendor/github.com/buildpacks/lifecycle/env/env.go b/vendor/github.com/buildpacks/lifecycle/env/env.go new file mode 100644 index 0000000000..ff99551d2f --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/env/env.go @@ -0,0 +1,195 @@ +package env + +import ( + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" +) + +// Env is used to modify and return environment variables +type Env struct { + // RootDirMap maps directories in a posix root filesystem to a slice of environment variables that + RootDirMap map[string][]string + Vars *Vars +} + +// AddRootDir modifies the environment given a root dir. If the root dir contains a directory that matches a key in +// the Env RooDirMap, the absolute path to the keyed directory will be prepended to all the associated environment variables +// using the OS path list separator as a delimiter. +func (p *Env) AddRootDir(dir string) error { + absDir, err := filepath.Abs(dir) + if err != nil { + return err + } + for dir, vars := range p.RootDirMap { + childDir := filepath.Join(absDir, dir) + if _, err := os.Stat(childDir); os.IsNotExist(err) { + continue + } else if err != nil { + return err + } + for _, key := range vars { + p.Vars.Set(key, childDir+prefix(p.Vars.Get(key), os.PathListSeparator)) + } + } + return nil +} + +func (p *Env) isRootEnv(name string) bool { + for _, m := range p.RootDirMap { + for _, k := range m { + if k == name { + return true + } + } + } + return false +} + +type ActionType string + +const ( + ActionTypePrepend ActionType = "prepend" + ActionTypeAppend ActionType = "append" + ActionTypeOverride ActionType = "override" + ActionTypeDefault ActionType = "default" + ActionTypePrependPath ActionType = "" +) + +// DefaultActionType returns the default action to perform for an unsuffixed env file as specified for the given +// buildpack API +func DefaultActionType(bpAPI *api.Version) ActionType { + if bpAPI != nil && bpAPI.LessThan("0.5") { + return ActionTypePrependPath + } + return ActionTypeOverride +} + +// AddEnvDir modified the Env given a directory containing env files. For each file in the envDir, if the file has +// a period delimited suffix, the action matching the given suffix will be performed. If the file has no suffix, +// the default action will be performed. If the suffix does not match a known type, AddEnvDir will ignore the file. +func (p *Env) AddEnvDir(envDir string, defaultAction ActionType) error { + if err := eachEnvFile(envDir, func(k, v string) error { + parts := strings.SplitN(k, ".", 2) + name := parts[0] + var action ActionType + if len(parts) > 1 { + action = ActionType(parts[1]) + } else { + action = defaultAction + } + switch action { + case ActionTypePrepend: + p.Vars.Set(name, v+prefix(p.Vars.Get(name), delim(envDir, name)...)) + case ActionTypeAppend: + p.Vars.Set(name, suffix(p.Vars.Get(name), delim(envDir, name)...)+v) + case ActionTypeOverride: + p.Vars.Set(name, v) + case ActionTypeDefault: + if p.Vars.Get(name) != "" { + return nil + } + p.Vars.Set(name, v) + case ActionTypePrependPath: + p.Vars.Set(name, v+prefix(p.Vars.Get(name), delim(envDir, name, os.PathListSeparator)...)) + } + return nil + }); err != nil { + return errors.Wrapf(err, "apply env files from dir '%s'", envDir) + } + return nil +} + +// Set sets the environment variable with the given name to the given value. +func (p *Env) Set(name, v string) { + p.Vars.Set(name, v) +} + +// WithPlatform returns the environment after applying modifications from the given platform dir. +// For each file in the platformDir, if the name of the file does not match an environment variable name in the +// RootDirMap, the given variable will be set to the contents of the file. If the name does match an environment +// variable name in the RootDirMap, the contents of the file will be prepended to the environment variable value +// using the OS path list separator as a delimiter. +func (p *Env) WithPlatform(platformDir string) (out []string, err error) { + vars := NewVars(p.Vars.vals, p.Vars.ignoreCase) + + if err := eachEnvFile(filepath.Join(platformDir, "env"), func(k, v string) error { + if p.isRootEnv(k) { + vars.Set(k, v+prefix(vars.Get(k), os.PathListSeparator)) + return nil + } + vars.Set(k, v) + return nil + }); err != nil { + return nil, err + } + return vars.List(), nil +} + +func prefix(s string, prefix ...byte) string { + if s == "" { + return "" + } + return string(prefix) + s +} + +func suffix(s string, suffix ...byte) string { + if s == "" { + return "" + } + return s + string(suffix) +} + +func delim(dir, name string, def ...byte) []byte { + value, err := ioutil.ReadFile(filepath.Join(dir, name+".delim")) + if err != nil { + return def + } + return value +} + +func eachEnvFile(dir string, fn func(k, v string) error) error { + files, err := ioutil.ReadDir(dir) + if os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + for _, f := range files { + if f.IsDir() { + continue + } + if f.Mode()&os.ModeSymlink != 0 { + lnFile, err := os.Stat(filepath.Join(dir, f.Name())) + if err != nil { + return err + } + if lnFile.IsDir() { + continue + } + } + value, err := ioutil.ReadFile(filepath.Join(dir, f.Name())) + if err != nil { + return err + } + if err := fn(f.Name(), string(value)); err != nil { + return err + } + } + return nil +} + +// List returns the environment +func (p *Env) List() []string { + return p.Vars.List() +} + +// Get returns the value for the given key +func (p *Env) Get(k string) string { + return p.Vars.Get(k) +} diff --git a/vendor/github.com/buildpacks/lifecycle/env/launch.go b/vendor/github.com/buildpacks/lifecycle/env/launch.go new file mode 100644 index 0000000000..c4d255acc2 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/env/launch.go @@ -0,0 +1,51 @@ +package env + +import ( + "os" + "strings" +) + +var LaunchEnvExcludelist = []string{ + "CNB_LAYERS_DIR", + "CNB_APP_DIR", + "CNB_PROCESS_TYPE", + "CNB_PLATFORM_API", + "CNB_DEPRECATION_MODE", +} + +// NewLaunchEnv returns an Env for process launch from the given environ. +// +// Keys in the LaunchEnvExcludelist shall be removed. +// processDir will be removed from the beginning of PATH if present. +func NewLaunchEnv(environ []string, processDir string, lifecycleDir string) *Env { + vars := varsFromEnv(environ, ignoreEnvVarCase, isExcluded) + if path, ok := vars.vals["PATH"]; ok { + parts := strings.Split(path, string(os.PathListSeparator)) + var stripped []string + for _, part := range parts { + if part == processDir || part == lifecycleDir { + continue + } + stripped = append(stripped, part) + } + vars.vals["PATH"] = strings.Join(stripped, string(os.PathListSeparator)) + } + return &Env{ + RootDirMap: POSIXLaunchEnv, + Vars: vars, + } +} + +func isExcluded(k string) bool { + for _, wk := range LaunchEnvExcludelist { + if matches(wk, k) { + return true + } + } + return false +} + +var POSIXLaunchEnv = map[string][]string{ + "bin": {"PATH"}, + "lib": {"LD_LIBRARY_PATH"}, +} diff --git a/vendor/github.com/buildpacks/lifecycle/env/vars.go b/vendor/github.com/buildpacks/lifecycle/env/vars.go new file mode 100644 index 0000000000..d3ebac2277 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/env/vars.go @@ -0,0 +1,59 @@ +package env + +import ( + "strings" +) + +type Vars struct { + vals map[string]string + ignoreCase bool +} + +func varsFromEnv(env []string, ignoreCase bool, removeKey func(string) bool) *Vars { + vars := NewVars(nil, ignoreCase) + for _, kv := range env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + continue + } + if removeKey(parts[0]) { + continue + } + vars.Set(parts[0], parts[1]) + } + return vars +} + +func NewVars(vars map[string]string, ignoreCase bool) *Vars { + s := &Vars{ + vals: map[string]string{}, + ignoreCase: ignoreCase, + } + for k, v := range vars { + s.Set(k, v) + } + return s +} + +func (s *Vars) Get(key string) string { + return s.vals[s.key(key)] +} + +func (s *Vars) Set(key, value string) { + s.vals[s.key(key)] = value +} + +func (s *Vars) key(k string) string { + if s.ignoreCase { + return strings.ToUpper(k) + } + return k +} + +func (s *Vars) List() []string { + var result []string + for k, v := range s.vals { + result = append(result, k+"="+v) + } + return result +} diff --git a/vendor/github.com/buildpacks/lifecycle/exporter.go b/vendor/github.com/buildpacks/lifecycle/exporter.go new file mode 100644 index 0000000000..abf930a756 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/exporter.go @@ -0,0 +1,470 @@ +package lifecycle + +import ( + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/BurntSushi/toml" + "github.com/buildpacks/imgutil" + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/cmd" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/layers" + "github.com/buildpacks/lifecycle/platform" +) + +type Cache interface { + Exists() bool + Name() string + SetMetadata(metadata platform.CacheMetadata) error + RetrieveMetadata() (platform.CacheMetadata, error) + AddLayerFile(tarPath string, sha string) error + ReuseLayer(sha string) error + RetrieveLayer(sha string) (io.ReadCloser, error) + Commit() error +} + +type Exporter struct { + Buildpacks []buildpack.GroupBuildpack + LayerFactory LayerFactory + Logger Logger + PlatformAPI *api.Version +} + +//go:generate mockgen -package testmock -destination testmock/layer_factory.go github.com/buildpacks/lifecycle LayerFactory +type LayerFactory interface { + DirLayer(id string, dir string) (layers.Layer, error) + LauncherLayer(path string) (layers.Layer, error) + ProcessTypesLayer(metadata launch.Metadata) (layers.Layer, error) + SliceLayers(dir string, slices []layers.Slice) ([]layers.Layer, error) +} + +type LauncherConfig struct { + Path string + Metadata platform.LauncherMetadata +} + +type ExportOptions struct { + LayersDir string + AppDir string + WorkingImage imgutil.Image + RunImageRef string + OrigMetadata platform.LayersMetadata + AdditionalNames []string + LauncherConfig LauncherConfig + Stack platform.StackMetadata + Project platform.ProjectMetadata + DefaultProcessType string +} + +func (e *Exporter) Export(opts ExportOptions) (platform.ExportReport, error) { + var err error + + opts.LayersDir, err = filepath.Abs(opts.LayersDir) + if err != nil { + return platform.ExportReport{}, errors.Wrapf(err, "layers dir absolute path") + } + + opts.AppDir, err = filepath.Abs(opts.AppDir) + if err != nil { + return platform.ExportReport{}, errors.Wrapf(err, "app dir absolute path") + } + + meta := platform.LayersMetadata{} + meta.RunImage.TopLayer, err = opts.WorkingImage.TopLayer() + if err != nil { + return platform.ExportReport{}, errors.Wrap(err, "get run image top layer SHA") + } + + meta.RunImage.Reference = opts.RunImageRef + meta.Stack = opts.Stack + + buildMD := &platform.BuildMetadata{} + if _, err := toml.DecodeFile(launch.GetMetadataFilePath(opts.LayersDir), buildMD); err != nil { + return platform.ExportReport{}, errors.Wrap(err, "read build metadata") + } + + // buildpack-provided layers + if err := e.addBuildpackLayers(opts, &meta); err != nil { + return platform.ExportReport{}, err + } + + // app layers (split into 1 or more slices) + if err := e.addAppLayers(opts, buildMD.Slices, &meta); err != nil { + return platform.ExportReport{}, errors.Wrap(err, "exporting app layers") + } + + // launcher layers (launcher binary, launcher config, process symlinks) + if err := e.addLauncherLayers(opts, buildMD, &meta); err != nil { + return platform.ExportReport{}, err + } + + if err := e.setLabels(opts, meta, buildMD); err != nil { + return platform.ExportReport{}, err + } + + if err := e.setEnv(opts, buildMD.ToLaunchMD()); err != nil { + return platform.ExportReport{}, err + } + + if e.PlatformAPI.AtLeast("0.6") { + e.Logger.Debugf("Setting WORKDIR: '%s'", opts.AppDir) + if err := e.setWorkingDir(opts); err != nil { + return platform.ExportReport{}, errors.Wrap(err, "setting workdir") + } + } + + entrypoint, err := e.entrypoint(buildMD.ToLaunchMD(), opts.DefaultProcessType, buildMD.BuildpackDefaultProcessType) + if err != nil { + return platform.ExportReport{}, errors.Wrap(err, "determining entrypoint") + } + e.Logger.Debugf("Setting ENTRYPOINT: '%s'", entrypoint) + if err = opts.WorkingImage.SetEntrypoint(entrypoint); err != nil { + return platform.ExportReport{}, errors.Wrap(err, "setting entrypoint") + } + + if err = opts.WorkingImage.SetCmd(); err != nil { // Note: Command intentionally empty + return platform.ExportReport{}, errors.Wrap(err, "setting cmd") + } + + report := platform.ExportReport{} + report.Build, err = e.makeBuildReport(opts.LayersDir) + if err != nil { + return platform.ExportReport{}, err + } + report.Image, err = saveImage(opts.WorkingImage, opts.AdditionalNames, e.Logger) + if err != nil { + return platform.ExportReport{}, err + } + if !e.supportsManifestSize() { + // unset manifest size in report.toml for old platform API versions + report.Image.ManifestSize = 0 + } + + return report, nil +} + +func (e *Exporter) addBuildpackLayers(opts ExportOptions, meta *platform.LayersMetadata) error { + for _, bp := range e.Buildpacks { + bpDir, err := readBuildpackLayersDir(opts.LayersDir, bp, e.Logger) + if err != nil { + return errors.Wrapf(err, "reading layers for buildpack '%s'", bp.ID) + } + bpMD := platform.BuildpackLayersMetadata{ + ID: bp.ID, + Version: bp.Version, + Layers: map[string]platform.BuildpackLayerMetadata{}, + Store: bpDir.store, + } + for _, fsLayer := range bpDir.findLayers(forLaunch) { + fsLayer := fsLayer + lmd, err := fsLayer.read() + if err != nil { + return errors.Wrapf(err, "reading '%s' metadata", fsLayer.Identifier()) + } + + if fsLayer.hasLocalContents() { + layer, err := e.LayerFactory.DirLayer(fsLayer.Identifier(), fsLayer.path) + if err != nil { + return errors.Wrapf(err, "creating layer") + } + origLayerMetadata := opts.OrigMetadata.MetadataForBuildpack(bp.ID).Layers[fsLayer.name()] + lmd.SHA, err = e.addOrReuseLayer(opts.WorkingImage, layer, origLayerMetadata.SHA) + if err != nil { + return err + } + } else { + if lmd.Cache { + return fmt.Errorf("layer '%s' is cache=true but has no contents", fsLayer.Identifier()) + } + origLayerMetadata, ok := opts.OrigMetadata.MetadataForBuildpack(bp.ID).Layers[fsLayer.name()] + if !ok { + return fmt.Errorf("cannot reuse '%s', previous image has no metadata for layer '%s'", fsLayer.Identifier(), fsLayer.Identifier()) + } + + e.Logger.Infof("Reusing layer '%s'\n", fsLayer.Identifier()) + e.Logger.Debugf("Layer '%s' SHA: %s\n", fsLayer.Identifier(), origLayerMetadata.SHA) + if err := opts.WorkingImage.ReuseLayer(origLayerMetadata.SHA); err != nil { + return errors.Wrapf(err, "reusing layer: '%s'", fsLayer.Identifier()) + } + lmd.SHA = origLayerMetadata.SHA + } + bpMD.Layers[fsLayer.name()] = lmd + } + meta.Buildpacks = append(meta.Buildpacks, bpMD) + + if malformedLayers := bpDir.findLayers(forMalformed); len(malformedLayers) > 0 { + ids := make([]string, 0, len(malformedLayers)) + for _, ml := range malformedLayers { + ids = append(ids, ml.Identifier()) + } + return fmt.Errorf("failed to parse metadata for layers '%s'", ids) + } + } + return nil +} + +func (e *Exporter) addLauncherLayers(opts ExportOptions, buildMD *platform.BuildMetadata, meta *platform.LayersMetadata) error { + launcherLayer, err := e.LayerFactory.LauncherLayer(opts.LauncherConfig.Path) + if err != nil { + return errors.Wrap(err, "creating launcher layers") + } + meta.Launcher.SHA, err = e.addOrReuseLayer(opts.WorkingImage, launcherLayer, opts.OrigMetadata.Launcher.SHA) + if err != nil { + return errors.Wrap(err, "exporting launcher configLayer") + } + configLayer, err := e.LayerFactory.DirLayer("config", filepath.Join(opts.LayersDir, "config")) + if err != nil { + return errors.Wrapf(err, "creating layer '%s'", configLayer.ID) + } + meta.Config.SHA, err = e.addOrReuseLayer(opts.WorkingImage, configLayer, opts.OrigMetadata.Config.SHA) + if err != nil { + return errors.Wrap(err, "exporting config layer") + } + + if err := e.launcherConfig(opts, buildMD, meta); err != nil { + return err + } + return nil +} + +func (e *Exporter) addAppLayers(opts ExportOptions, slices []layers.Slice, meta *platform.LayersMetadata) error { + // creating app layers (slices + app dir) + sliceLayers, err := e.LayerFactory.SliceLayers(opts.AppDir, slices) + if err != nil { + return errors.Wrap(err, "creating app layers") + } + + var numberOfReusedLayers int + for _, slice := range sliceLayers { + var err error + + found := false + for _, previous := range opts.OrigMetadata.App { + if slice.Digest == previous.SHA { + found = true + break + } + } + if found { + err = opts.WorkingImage.ReuseLayer(slice.Digest) + numberOfReusedLayers++ + } else { + err = opts.WorkingImage.AddLayerWithDiffID(slice.TarPath, slice.Digest) + } + if err != nil { + return err + } + e.Logger.Debugf("Layer '%s' SHA: %s\n", slice.ID, slice.Digest) + meta.App = append(meta.App, platform.LayerMetadata{SHA: slice.Digest}) + } + + delta := len(sliceLayers) - numberOfReusedLayers + if numberOfReusedLayers > 0 { + e.Logger.Infof("Reusing %d/%d app layer(s)\n", numberOfReusedLayers, len(sliceLayers)) + } + if delta != 0 { + e.Logger.Infof("Adding %d/%d app layer(s)\n", delta, len(sliceLayers)) + } + return nil +} + +func (e *Exporter) setLabels(opts ExportOptions, meta platform.LayersMetadata, buildMD *platform.BuildMetadata) error { + data, err := json.Marshal(meta) + if err != nil { + return errors.Wrap(err, "marshall metadata") + } + + e.Logger.Infof("Adding label '%s'", platform.LayerMetadataLabel) + if err = opts.WorkingImage.SetLabel(platform.LayerMetadataLabel, string(data)); err != nil { + return errors.Wrap(err, "set app image metadata label") + } + + buildMD.Launcher = opts.LauncherConfig.Metadata + buildJSON, err := json.Marshal(buildMD) + if err != nil { + return errors.Wrap(err, "parse build metadata") + } + + e.Logger.Infof("Adding label '%s'", platform.BuildMetadataLabel) + if err := opts.WorkingImage.SetLabel(platform.BuildMetadataLabel, string(buildJSON)); err != nil { + return errors.Wrap(err, "set build image metadata label") + } + + projectJSON, err := json.Marshal(opts.Project) + if err != nil { + return errors.Wrap(err, "parse project metadata") + } + + e.Logger.Infof("Adding label '%s'", platform.ProjectMetadataLabel) + if err := opts.WorkingImage.SetLabel(platform.ProjectMetadataLabel, string(projectJSON)); err != nil { + return errors.Wrap(err, "set project metadata label") + } + + for _, label := range buildMD.Labels { + e.Logger.Infof("Adding label '%s'", label.Key) + if err := opts.WorkingImage.SetLabel(label.Key, label.Value); err != nil { + return errors.Wrapf(err, "set buildpack-provided label '%s'", label.Key) + } + } + return nil +} + +func (e *Exporter) setEnv(opts ExportOptions, launchMD launch.Metadata) error { + e.Logger.Debugf("Setting %s=%s", cmd.EnvLayersDir, opts.LayersDir) + if err := opts.WorkingImage.SetEnv(cmd.EnvLayersDir, opts.LayersDir); err != nil { + return errors.Wrapf(err, "set app image env %s", cmd.EnvLayersDir) + } + + e.Logger.Debugf("Setting %s=%s", cmd.EnvAppDir, opts.AppDir) + if err := opts.WorkingImage.SetEnv(cmd.EnvAppDir, opts.AppDir); err != nil { + return errors.Wrapf(err, "set app image env %s", cmd.EnvAppDir) + } + + e.Logger.Debugf("Setting %s=%s", cmd.EnvPlatformAPI, e.PlatformAPI.String()) + if err := opts.WorkingImage.SetEnv(cmd.EnvPlatformAPI, e.PlatformAPI.String()); err != nil { + return errors.Wrapf(err, "set app image env %s", cmd.EnvAppDir) + } + + e.Logger.Debugf("Setting %s=%s", cmd.EnvDeprecationMode, cmd.DeprecationModeQuiet) + if err := opts.WorkingImage.SetEnv(cmd.EnvDeprecationMode, cmd.DeprecationModeQuiet); err != nil { + return errors.Wrapf(err, "set app image env %s", cmd.EnvAppDir) + } + + if e.supportsMulticallLauncher() { + path, err := opts.WorkingImage.Env("PATH") + if err != nil { + return errors.Wrap(err, "failed to get PATH from app image") + } + path = strings.Join([]string{launch.ProcessDir, launch.LifecycleDir, path}, string(os.PathListSeparator)) + e.Logger.Debugf("Prepending %s and %s to PATH", launch.ProcessDir, launch.LifecycleDir) + if err := opts.WorkingImage.SetEnv("PATH", path); err != nil { + return errors.Wrap(err, "set app image env PATH") + } + } else if opts.DefaultProcessType != "" { + if _, ok := launchMD.FindProcessType(opts.DefaultProcessType); !ok { + return processTypeError(launchMD, opts.DefaultProcessType) + } + e.Logger.Debugf("Setting %s=%s", cmd.EnvProcessType, opts.DefaultProcessType) + if err := opts.WorkingImage.SetEnv(cmd.EnvProcessType, opts.DefaultProcessType); err != nil { + return errors.Wrapf(err, "set app image env %s", cmd.EnvProcessType) + } + } + return nil +} + +func (e *Exporter) setWorkingDir(opts ExportOptions) error { + return opts.WorkingImage.SetWorkingDir(opts.AppDir) +} + +func (e *Exporter) entrypoint(launchMD launch.Metadata, userDefaultProcessType, buildpackDefaultProcessType string) (string, error) { + if !e.supportsMulticallLauncher() { + return launch.LauncherPath, nil + } + + if userDefaultProcessType == "" && e.PlatformAPI.LessThan("0.6") && len(launchMD.Processes) == 1 { + // if there is only one process, we set it to the default for platform API < 0.6 + e.Logger.Infof("Setting default process type '%s'", launchMD.Processes[0].Type) + return launch.ProcessPath(launchMD.Processes[0].Type), nil + } + + if userDefaultProcessType != "" { + defaultProcess, ok := launchMD.FindProcessType(userDefaultProcessType) + if !ok { + if e.PlatformAPI.LessThan("0.6") { + e.Logger.Warn(processTypeWarning(launchMD, userDefaultProcessType)) + return launch.LauncherPath, nil + } + return "", fmt.Errorf("tried to set %s to default but it doesn't exist", userDefaultProcessType) + } + e.Logger.Infof("Setting default process type '%s'", defaultProcess.Type) + return launch.ProcessPath(defaultProcess.Type), nil + } + if buildpackDefaultProcessType == "" { + e.Logger.Info("no default process type") + return launch.LauncherPath, nil + } + e.Logger.Infof("Setting default process type '%s'", buildpackDefaultProcessType) + return launch.ProcessPath(buildpackDefaultProcessType), nil +} + +// processTypes adds +func (e *Exporter) launcherConfig(opts ExportOptions, buildMD *platform.BuildMetadata, meta *platform.LayersMetadata) error { + if e.supportsMulticallLauncher() { + launchMD := launch.Metadata{ + Processes: buildMD.Processes, + } + if len(buildMD.Processes) > 0 { + processTypesLayer, err := e.LayerFactory.ProcessTypesLayer(launchMD) + if err != nil { + return errors.Wrapf(err, "creating layer '%s'", processTypesLayer.ID) + } + meta.ProcessTypes.SHA, err = e.addOrReuseLayer(opts.WorkingImage, processTypesLayer, opts.OrigMetadata.ProcessTypes.SHA) + if err != nil { + return errors.Wrapf(err, "exporting layer '%s'", processTypesLayer.ID) + } + } + } + return nil +} + +func (e *Exporter) supportsMulticallLauncher() bool { + return e.PlatformAPI.AtLeast("0.4") +} + +func (e *Exporter) supportsManifestSize() bool { + return e.PlatformAPI.AtLeast("0.6") +} + +func processTypeError(launchMD launch.Metadata, defaultProcessType string) error { + return fmt.Errorf(processTypeWarning(launchMD, defaultProcessType)) +} + +func processTypeWarning(launchMD launch.Metadata, defaultProcessType string) string { + var typeList []string + for _, p := range launchMD.Processes { + typeList = append(typeList, p.Type) + } + return fmt.Sprintf("default process type '%s' not present in list %+v", defaultProcessType, typeList) +} + +func (e *Exporter) addOrReuseLayer(image imgutil.Image, layer layers.Layer, previousSHA string) (string, error) { + layer, err := e.LayerFactory.DirLayer(layer.ID, layer.TarPath) + if err != nil { + return "", errors.Wrapf(err, "creating layer '%s'", layer.ID) + } + if layer.Digest == previousSHA { + e.Logger.Infof("Reusing layer '%s'\n", layer.ID) + e.Logger.Debugf("Layer '%s' SHA: %s\n", layer.ID, layer.Digest) + return layer.Digest, image.ReuseLayer(previousSHA) + } + e.Logger.Infof("Adding layer '%s'\n", layer.ID) + e.Logger.Debugf("Layer '%s' SHA: %s\n", layer.ID, layer.Digest) + return layer.Digest, image.AddLayerWithDiffID(layer.TarPath, layer.Digest) +} + +func (e *Exporter) makeBuildReport(layersDir string) (platform.BuildReport, error) { + if e.PlatformAPI.LessThan("0.5") { + return platform.BuildReport{}, nil + } + var out []buildpack.BOMEntry + for _, bp := range e.Buildpacks { + if api.MustParse(bp.API).LessThan("0.5") { + continue + } + var bpBuildReport platform.BuildReport + bpBuildTOML := filepath.Join(layersDir, launch.EscapeID(bp.ID), "build.toml") + if _, err := toml.DecodeFile(bpBuildTOML, &bpBuildReport); err != nil && !os.IsNotExist(err) { + return platform.BuildReport{}, err + } + out = append(out, buildpack.WithBuildpack(bp, bpBuildReport.BOM)...) + } + return platform.BuildReport{BOM: out}, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/go.mod b/vendor/github.com/buildpacks/lifecycle/go.mod new file mode 100644 index 0000000000..2ef8b7e0f6 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/go.mod @@ -0,0 +1,25 @@ +module github.com/buildpacks/lifecycle + +require ( + github.com/BurntSushi/toml v0.4.1 + github.com/apex/log v1.9.0 + github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771 + github.com/docker/docker v20.10.8+incompatible + github.com/docker/docker-credential-helpers v0.6.4 // indirect + github.com/golang/mock v1.6.0 + github.com/google/go-cmp v0.5.6 + github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9 + github.com/heroku/color v0.0.6 + github.com/mattn/go-colorable v0.1.8 // indirect + github.com/mattn/go-isatty v0.0.13 // indirect + github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect + github.com/pkg/errors v0.9.1 + github.com/sclevine/spec v1.4.0 + golang.org/x/net v0.0.0-20210610132358-84b48f89b13b // indirect + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210608053332-aa57babbf139 + google.golang.org/genproto v0.0.0-20210610141715-e7a9b787a5a4 // indirect + gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 // indirect +) + +go 1.16 diff --git a/vendor/github.com/buildpacks/lifecycle/go.sum b/vendor/github.com/buildpacks/lifecycle/go.sum new file mode 100644 index 0000000000..5eb17eb302 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/go.sum @@ -0,0 +1,1219 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795 h1:q4kDoSrHgRoD6okimjwWJOVKyxEUNS2JIuwt+EqcIqQ= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= +github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771 h1:QhUkvEeYhnyj80genY7ktXVLttVc52zstwPTUDrImvE= +github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771/go.mod h1:ZfCbsFruoxG0vbEVMsX/afizEo4vn2e88Km7rJ1M2D8= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.2 h1:MG/Bg1pbmMb61j3wHCFWPxESXHieiKr2xG64px/k8zQ= +github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.6.4 h1:lhJppIf4ULGQXbcjUJIy1sq79UegNTEebDTtfU8MlcA= +github.com/containerd/stargz-snapshotter/estargz v0.6.4/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v20.10.7+incompatible h1:pv/3NqibQKphWZiAskMzdz8w0PRbtTaEB+f6NwdU7Is= +github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.8+incompatible h1:RVqD337BgQicVCzYrrlhLDWhq6OAD2PJDUg2LsEUvKM= +github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9 h1:YbairAn5rtnnWrmxe1BQRXl2EY8VptU+moIayUu+PMQ= +github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9/go.mod h1:R5WRYyTdQqTchlBhX4q+WICGh8HQIL5wDFoFZv7Jq6Q= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/heroku/color v0.0.6 h1:UTFFMrmMLFcL3OweqP1lAdp8i1y/9oHqkeHjQ/b/Ny0= +github.com/heroku/color v0.0.6/go.mod h1:ZBvOcx7cTF2QKOv4LbmoBtNl5uB17qWxGuzZrsi1wLU= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs= +github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= +github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210608053332-aa57babbf139 h1:C+AwYEtBp/VQwoLntUmQ/yx3MS9vmZaKNdw5eOpoQe8= +golang.org/x/sys v0.0.0-20210608053332-aa57babbf139/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210603172842-58e84a565dcf/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210610141715-e7a9b787a5a4 h1:Kgs5nmbQVuUAug2PXQ27hu9MSCv8uJTnrmxZj9Lj5lc= +google.golang.org/genproto v0.0.0-20210610141715-e7a9b787a5a4/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/buildpacks/lifecycle/golangci.yaml b/vendor/github.com/buildpacks/lifecycle/golangci.yaml new file mode 100644 index 0000000000..a619556880 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/golangci.yaml @@ -0,0 +1,34 @@ +run: + timeout: 6m + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - errcheck + - gocritic + - goimports + - golint + - gosec + - gosimple + - govet + - ineffassign + - maligned + - misspell + - nakedret + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + + +linters-settings: + goimports: + local-prefixes: github.com/buildpacks/lifecycle diff --git a/vendor/github.com/buildpacks/lifecycle/launch/bash.go b/vendor/github.com/buildpacks/lifecycle/launch/bash.go new file mode 100644 index 0000000000..653acf523d --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/bash.go @@ -0,0 +1,59 @@ +package launch + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + bashCommandWithScript = `exec bash -c "$@"` // for processes w/o arguments +) + +type BashShell struct { + Exec ExecFunc +} + +// Launch launches the given ShellProcess with Bash +// +// It shall execute a Bash command that sources profile scripts and then executes the process in a nested Bash command +// When ShellProcess.Script is true nested Bash script shall be proc.Command with proc.Args provided as argument to Bash +// When ShellProcess.Script is false a Bash command shall be contructed from proc.Command and proc.Args +func (b *BashShell) Launch(proc ShellProcess) error { + launcher := "" + for _, profile := range proc.Profiles { + launcher += fmt.Sprintf("source \"%s\"\n", profile) + } + var bashCommand string + if proc.Script { + bashCommand = bashCommandWithScript + } else { + bashCommand = bashCommandWithTokens(len(proc.Args) + 1) + } + launcher += bashCommand + if err := b.Exec("/bin/bash", append([]string{ + "bash", "-c", + launcher, proc.Caller, proc.Command, + }, proc.Args...), proc.Env); err != nil { + return errors.Wrap(err, "bash exec") + } + return nil +} + +// bashCommandWithTokens returns a bash script that should be executed with nTokens number of bash arguments +// Each argument to bash is evaluated by the shell before becoming a token in the resulting script +// Example: +// Given nTokens=2 the returned script will contain `"$(eval echo \"$0\")" "$(eval echo \"$1\")"` +// and should be evaluated with `bash -c '"$(eval echo \"$0\")" "$(eval echo \"$1\")"' ' +// Token evaluation example: +// "$(eval echo \"$0\"`)" // given $0='$FOO' and $FOO='arg with spaces" && quotes' +// -> "$(eval echo \"'$FOO'\")" +// -> "$(echo \"'arg with spaces" && quotes'\")" +// -> "arg with spaces\" && quotes" // this is an evaluated and properly quoted token +func bashCommandWithTokens(nTokens int) string { + commandScript := `"$(eval echo \"$0\")"` + for i := 1; i < nTokens; i++ { + commandScript += fmt.Sprintf(` "$(eval echo \"${%d}\")"`, i) + } + return fmt.Sprintf(`exec bash -c '%s' "${@:1}"`, commandScript) +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/cmd.go b/vendor/github.com/buildpacks/lifecycle/launch/cmd.go new file mode 100644 index 0000000000..8fbfe242c5 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/cmd.go @@ -0,0 +1,25 @@ +package launch + +import ( + "github.com/pkg/errors" +) + +type CmdShell struct { + Exec ExecFunc +} + +// Launch launches the given ShellProcess with cmd +func (c *CmdShell) Launch(proc ShellProcess) error { + var commandTokens []string + for _, profile := range proc.Profiles { + commandTokens = append(commandTokens, "call", profile, "&&") + } + commandTokens = append(commandTokens, proc.Command) + commandTokens = append(commandTokens, proc.Args...) + if err := c.Exec("cmd", + append([]string{"cmd", "/q", "/v:on", "/s", "/c"}, commandTokens...), proc.Env, + ); err != nil { + return errors.Wrap(err, "cmd execute") + } + return nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/exec_d.go b/vendor/github.com/buildpacks/lifecycle/launch/exec_d.go new file mode 100644 index 0000000000..3bceb66e64 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/exec_d.go @@ -0,0 +1,64 @@ +package launch + +import ( + "io" + "io/ioutil" + "os" + "os/exec" + + "github.com/BurntSushi/toml" + "github.com/pkg/errors" +) + +// ExecDRunner is responsible for running ExecD binaries. +type ExecDRunner struct { + Out, Err io.Writer // Out and Err can be used to configure Stdout and Stderr processes run by ExecDRunner. +} + +// NewExecDRunner creates an ExecDRunner with Out and Err set to stdout and stderr +func NewExecDRunner() *ExecDRunner { + return &ExecDRunner{ + Out: os.Stdout, + Err: os.Stderr, + } +} + +// ExecD executes the executable file at path and sets the returned variables in env. The executable at path +// should implement the ExecD interface in the buildpack specification https://github.com/buildpacks/spec/blob/main/buildpack.md#execd +func (e *ExecDRunner) ExecD(path string, env Env) error { + pr, pw, err := os.Pipe() + if err != nil { + return errors.Wrap(err, "failed to create pipe") + } + errChan := make(chan error, 1) + go func() { + defer pw.Close() + cmd := exec.Command(path) + cmd.Stdout = e.Out + cmd.Stderr = e.Err + cmd.Env = env.List() + if err := setHandle(cmd, pw); err != nil { + errChan <- err + } else { + errChan <- cmd.Run() + } + }() + + out, err := ioutil.ReadAll(pr) + if cmdErr := <-errChan; cmdErr != nil { + // prefer the error from the command + return errors.Wrapf(cmdErr, "failed to execute exec.d file at path '%s'", path) + } else if err != nil { + // return the read error only if the command succeeded + return errors.Wrapf(err, "failed to read output from exec.d file at path '%s'", path) + } + + envVars := map[string]string{} + if _, err := toml.Decode(string(out), &envVars); err != nil { + return errors.Wrapf(err, "failed to decode output from exec.d file at path '%s'", path) + } + for k, v := range envVars { + env.Set(k, v) + } + return nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go b/vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go new file mode 100644 index 0000000000..e758857b6e --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/exec_d_unix.go @@ -0,0 +1,13 @@ +//+build linux darwin + +package launch + +import ( + "os" + "os/exec" +) + +func setHandle(cmd *exec.Cmd, f *os.File) error { + cmd.ExtraFiles = []*os.File{f} + return nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/exec_d_windows.go b/vendor/github.com/buildpacks/lifecycle/launch/exec_d_windows.go new file mode 100644 index 0000000000..9789c6c5da --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/exec_d_windows.go @@ -0,0 +1,21 @@ +package launch + +import ( + "fmt" + "os" + "os/exec" + + "golang.org/x/sys/windows" +) + +const EnvExecDHandle = "CNB_EXEC_D_HANDLE" + +func setHandle(cmd *exec.Cmd, f *os.File) error { + handle := f.Fd() + if err := windows.SetHandleInformation(windows.Handle(handle), windows.HANDLE_FLAG_INHERIT, windows.HANDLE_FLAG_INHERIT); err != nil { + return err + } + + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%#x", EnvExecDHandle, handle)) + return nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/launch.go b/vendor/github.com/buildpacks/lifecycle/launch/launch.go new file mode 100644 index 0000000000..3d0c3e4086 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/launch.go @@ -0,0 +1,53 @@ +package launch + +import ( + "path" + "path/filepath" + "strings" +) + +type Process struct { + Type string `toml:"type" json:"type"` + Command string `toml:"command" json:"command"` + Args []string `toml:"args" json:"args"` + Direct bool `toml:"direct" json:"direct"` + Default bool `toml:"default,omitempty" json:"default,omitempty"` + BuildpackID string `toml:"buildpack-id" json:"buildpackID"` +} + +func (p Process) NoDefault() Process { + p.Default = false + return p +} + +// ProcessPath returns the absolute path to the symlink for a given process type +func ProcessPath(pType string) string { + return filepath.Join(ProcessDir, pType+exe) +} + +type Metadata struct { + Processes []Process `toml:"processes" json:"processes"` + Buildpacks []Buildpack `toml:"buildpacks" json:"buildpacks"` +} + +func (m Metadata) FindProcessType(pType string) (Process, bool) { + for _, p := range m.Processes { + if p.Type == pType { + return p, true + } + } + return Process{}, false +} + +type Buildpack struct { + API string `toml:"api"` + ID string `toml:"id"` +} + +func EscapeID(id string) string { + return strings.Replace(id, "/", "_", -1) +} + +func GetMetadataFilePath(layersDir string) string { + return path.Join(layersDir, "config", "metadata.toml") +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/launcher.go b/vendor/github.com/buildpacks/lifecycle/launch/launcher.go new file mode 100644 index 0000000000..c2ac4f182c --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/launcher.go @@ -0,0 +1,209 @@ +package launch + +import ( + "io/ioutil" + "os" + "os/exec" + "path/filepath" + + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/env" +) + +var ( + LifecycleDir = filepath.Join(CNBDir, "lifecycle") + ProcessDir = filepath.Join(CNBDir, "process") + LauncherPath = filepath.Join(LifecycleDir, "launcher"+exe) +) + +type Launcher struct { + AppDir string + Buildpacks []Buildpack + DefaultProcessType string + Env Env + Exec ExecFunc + ExecD ExecD + Shell Shell + LayersDir string + PlatformAPI *api.Version + Processes []Process + Setenv func(string, string) error +} + +type ExecFunc func(argv0 string, argv []string, envv []string) error + +type ExecD interface { + ExecD(path string, env Env) error +} + +type Env interface { + AddEnvDir(envDir string, defaultAction env.ActionType) error + AddRootDir(baseDir string) error + Get(string) string + List() []string + Set(name, k string) +} + +// Launch uses cmd to select a process and launches that process. +// For direct=false processes, self is used to set argv0 during profile script execution +func (l *Launcher) Launch(self string, cmd []string) error { + proc, err := l.ProcessFor(cmd) + if err != nil { + return errors.Wrap(err, "determine start command") + } + return l.LaunchProcess(self, proc) +} + +// LaunchProcess launches the provided process. +// For direct=false processes, self is used to set argv0 during profile script execution +func (l *Launcher) LaunchProcess(self string, proc Process) error { + if err := os.Chdir(l.AppDir); err != nil { + return errors.Wrap(err, "change to app directory") + } + if err := l.doEnv(proc.Type); err != nil { + return errors.Wrap(err, "modify env") + } + if err := l.doExecD(proc.Type); err != nil { + return errors.Wrap(err, "exec.d") + } + + if proc.Direct { + return l.launchDirect(proc) + } + return l.launchWithShell(self, proc) +} + +func (l *Launcher) launchDirect(proc Process) error { + if err := l.Setenv("PATH", l.Env.Get("PATH")); err != nil { + return errors.Wrap(err, "set path") + } + binary, err := exec.LookPath(proc.Command) + if err != nil { + return errors.Wrap(err, "path lookup") + } + + if err := l.Exec(binary, + append([]string{proc.Command}, proc.Args...), + l.Env.List(), + ); err != nil { + return errors.Wrap(err, "direct exec") + } + return nil +} + +func (l *Launcher) doEnv(procType string) error { + return l.eachBuildpack(func(bpAPI *api.Version, bpDir string) error { + if err := eachLayer(bpDir, l.doLayerRoot()); err != nil { + return errors.Wrap(err, "add layer root") + } + if err := eachLayer(bpDir, l.doLayerEnvFiles(procType, env.DefaultActionType(bpAPI))); err != nil { + return errors.Wrap(err, "add layer env") + } + return nil + }) +} + +func (l *Launcher) doExecD(procType string) error { + return l.eachBuildpack(func(bpAPI *api.Version, bpDir string) error { + if !supportsExecD(bpAPI) { + return nil + } + return eachLayer(bpDir, l.doLayerExecD(procType)) + }) +} + +func supportsExecD(bpAPI *api.Version) bool { + return bpAPI.AtLeast("0.5") +} + +type bpAction func(bpAPI *api.Version, bpDir string) error +type dirAction func(layerDir string) error + +func (l *Launcher) eachBuildpack(fn bpAction) error { + for _, bp := range l.Buildpacks { + dir := filepath.Join(l.LayersDir, EscapeID(bp.ID)) + if _, err := os.Stat(dir); os.IsNotExist(err) { + continue + } else if err != nil { + return errors.Wrap(err, "find buildpack directory") + } + bpAPI, err := api.NewVersion(bp.API) + if err != nil { + return err + } + if err := fn(bpAPI, dir); err != nil { + return err + } + } + return nil +} + +func (l *Launcher) doLayerRoot() dirAction { + return func(path string) error { + return l.Env.AddRootDir(path) + } +} + +func (l *Launcher) doLayerEnvFiles(procType string, defaultAction env.ActionType) dirAction { + return func(path string) error { + if err := l.Env.AddEnvDir(filepath.Join(path, "env"), defaultAction); err != nil { + return err + } + if err := l.Env.AddEnvDir(filepath.Join(path, "env.launch"), defaultAction); err != nil { + return err + } + if procType == "" { + return nil + } + return l.Env.AddEnvDir(filepath.Join(path, "env.launch", procType), defaultAction) + } +} + +func (l *Launcher) doLayerExecD(procType string) dirAction { + return func(path string) error { + if err := eachFile(filepath.Join(path, "exec.d"), func(path string) error { + return l.ExecD.ExecD(path, l.Env) + }); err != nil { + return err + } + if procType == "" { + return nil + } + return eachFile(filepath.Join(path, "exec.d", procType), func(path string) error { + return l.ExecD.ExecD(path, l.Env) + }) + } +} + +func eachLayer(bpDir string, action dirAction) error { + return eachInDir(bpDir, action, func(fi os.FileInfo) bool { + return fi.IsDir() + }) +} + +func eachFile(dir string, action dirAction) error { + return eachInDir(dir, action, func(fi os.FileInfo) bool { + return !fi.IsDir() + }) +} + +func eachInDir(dir string, action dirAction, predicate func(fi os.FileInfo) bool) error { + fis, err := ioutil.ReadDir(dir) + if os.IsNotExist(err) { + return nil + } + if err != nil { + return errors.Wrapf(err, "failed to list files in dir '%s'", dir) + } + for _, fi := range fis { + if !predicate(fi) { + continue + } + if err := action(filepath.Join(dir, fi.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go b/vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go new file mode 100644 index 0000000000..53177e9689 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/launcher_unix.go @@ -0,0 +1,16 @@ +//+build linux darwin + +package launch + +import "syscall" + +const ( + CNBDir = `/cnb` + exe = "" + appProfile = ".profile" +) + +var ( + OSExecFunc = syscall.Exec + DefaultShell = &BashShell{Exec: OSExecFunc} +) diff --git a/vendor/github.com/buildpacks/lifecycle/launch/launcher_windows.go b/vendor/github.com/buildpacks/lifecycle/launch/launcher_windows.go new file mode 100644 index 0000000000..57b4a358e5 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/launcher_windows.go @@ -0,0 +1,25 @@ +package launch + +import ( + "os" + "os/exec" +) + +const ( + CNBDir = `c:\cnb` + exe = ".exe" + appProfile = ".profile.bat" +) + +var ( + DefaultShell = &CmdShell{Exec: OSExecFunc} +) + +func OSExecFunc(argv0 string, argv []string, envv []string) error { + c := exec.Command(argv[0], argv[1:]...) // #nosec G204 + c.Env = envv + c.Stdin = os.Stdin + c.Stdout = os.Stdout + c.Stderr = os.Stderr + return c.Run() +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/process.go b/vendor/github.com/buildpacks/lifecycle/launch/process.go new file mode 100644 index 0000000000..0ed53aae32 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/process.go @@ -0,0 +1,77 @@ +package launch + +import ( + "fmt" + + "github.com/pkg/errors" +) + +// ProcessFor creates a process from container cmd +// If the Platform API if 0.4 or greater and DefaultProcess is set: +// * The default process is returned with `cmd` appended to the process args +// If the Platform API is less than 0.4 +// * If there is exactly one argument and it matches a process type, it returns that process. +// * If cmd is empty, it returns the default process +// Else +// * it constructs a new process from cmd +// * If the first element in cmd is `cmd` the process shall be direct +func (l *Launcher) ProcessFor(cmd []string) (Process, error) { + if l.PlatformAPI.LessThan("0.4") { + return l.processForLegacy(cmd) + } + + if l.DefaultProcessType == "" { + process, err := l.userProvidedProcess(cmd) + if err != nil { + return Process{}, err + } + return process, nil + } + + process, ok := l.findProcessType(l.DefaultProcessType) + if !ok { + return Process{}, fmt.Errorf("process type %s was not found", l.DefaultProcessType) + } + process.Args = append(process.Args, cmd...) + + return process, nil +} + +func (l *Launcher) processForLegacy(cmd []string) (Process, error) { + if len(cmd) == 0 { + if process, ok := l.findProcessType(l.DefaultProcessType); ok { + return process, nil + } + + return Process{}, fmt.Errorf("process type %s was not found", l.DefaultProcessType) + } + + if len(cmd) == 1 { + if process, ok := l.findProcessType(cmd[0]); ok { + return process, nil + } + } + + return l.userProvidedProcess(cmd) +} + +func (l *Launcher) findProcessType(pType string) (Process, bool) { + for _, p := range l.Processes { + if p.Type == pType { + return p, true + } + } + + return Process{}, false +} + +func (l *Launcher) userProvidedProcess(cmd []string) (Process, error) { + if len(cmd) == 0 { + return Process{}, errors.New("when there is no default process a command is required") + } + if len(cmd) > 1 && cmd[0] == "--" { + return Process{Command: cmd[1], Args: cmd[2:], Direct: true}, nil + } + + return Process{Command: cmd[0], Args: cmd[1:]}, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/launch/shell.go b/vendor/github.com/buildpacks/lifecycle/launch/shell.go new file mode 100644 index 0000000000..62bf350e03 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/launch/shell.go @@ -0,0 +1,126 @@ +package launch + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" +) + +type Shell interface { + Launch(ShellProcess) error +} + +type ShellProcess struct { + Script bool // Script indicates whether Command is a script or should be a token in a generated script + Args []string + Command string + Caller string // Caller used to set argv0 for Bash profile scripts and is ignored in Cmd + Profiles []string + Env []string +} + +func (l *Launcher) launchWithShell(self string, proc Process) error { + profs, err := l.getProfiles(proc.Type) + if err != nil { + return errors.Wrap(err, "find profiles") + } + script, err := l.isScript(proc) + if err != nil { + return err + } + return l.Shell.Launch(ShellProcess{ + Script: script, + Caller: self, + Command: proc.Command, + Args: proc.Args, + Profiles: profs, + Env: l.Env.List(), + }) +} + +func (l *Launcher) getProfiles(procType string) ([]string, error) { + var profiles []string + if err := l.eachBuildpack(func(_ *api.Version, bpDir string) error { + return eachLayer(bpDir, l.populateLayerProfiles(procType, &profiles)) + }); err != nil { + return nil, err + } + + fi, err := os.Stat(filepath.Join(l.AppDir, appProfile)) + if os.IsNotExist(err) { + return profiles, nil + } else if err != nil { + return nil, errors.Wrapf(err, "failed to determine if app profile script exists at path '%s'", filepath.Join(l.AppDir, appProfile)) + } + if !fi.IsDir() { + profiles = append(profiles, appProfile) + } + + return profiles, nil +} + +func (l *Launcher) populateLayerProfiles(procType string, profiles *[]string) dirAction { + return func(layerDir string) error { + if err := eachFile(filepath.Join(layerDir, "profile.d"), func(path string) error { + *profiles = append(*profiles, path) + return nil + }); err != nil { + return err + } + if procType == "" { + return nil + } + return eachFile(filepath.Join(layerDir, "profile.d", procType), func(path string) error { + *profiles = append(*profiles, path) + return nil + }) + } +} + +func (l *Launcher) isScript(proc Process) (bool, error) { + if runtime.GOOS == "windows" { + // Windows does not support script commands + return false, nil + } + if len(proc.Args) == 0 { + return true, nil + } + bpAPI, err := l.buildpackAPI(proc) + if err != nil { + return false, err + } + if bpAPI == nil { + return false, err + } + if isLegacyProcess(bpAPI) { + return true, nil + } + return false, nil +} + +// buildpackAPI returns the API of the buildpack that contributed the process and true if the process was contributed +// by a buildpack. If the process was not provided by a buildpack it returns nil. +func (l *Launcher) buildpackAPI(proc Process) (*api.Version, error) { + if proc.BuildpackID == "" { + return nil, nil + } + for _, bp := range l.Buildpacks { + if bp.ID == proc.BuildpackID { + api, err := api.NewVersion(bp.API) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse api '%s' of buildpack '%s'", bp.API, bp.ID) + } + return api, nil + } + } + return nil, fmt.Errorf("process type '%s' provided by unknown buildpack '%s'", proc.Type, proc.BuildpackID) +} + +func isLegacyProcess(bpAPI *api.Version) bool { + return bpAPI.LessThan("0.4") +} diff --git a/vendor/github.com/buildpacks/lifecycle/layer_metadata_restorer.go b/vendor/github.com/buildpacks/lifecycle/layer_metadata_restorer.go new file mode 100644 index 0000000000..8346804877 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layer_metadata_restorer.go @@ -0,0 +1,209 @@ +package lifecycle + +import ( + "fmt" + "path/filepath" + + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/platform" +) + +//go:generate mockgen -package testmock -destination testmock/layer_metadata_restorer.go github.com/buildpacks/lifecycle LayerMetadataRestorer +type LayerMetadataRestorer interface { + Restore(buildpacks []buildpack.GroupBuildpack, appMeta platform.LayersMetadata, cacheMeta platform.CacheMetadata, layerSHAStore LayerSHAStore) error +} + +type DefaultLayerMetadataRestorer struct { + logger Logger + layersDir string + skipLayers bool +} + +type LayerSHAStore interface { + add(buildpackID, sha string, layer *bpLayer) error + get(buildpackID string, layer bpLayer) (string, error) +} + +func NewLayerSHAStore(useShaFiles bool) LayerSHAStore { + if useShaFiles { + return &filesLayerSHAStore{} + } + return &mapLayerSHAStore{make(map[string]layerToSha)} +} + +type filesLayerSHAStore struct { +} + +func (flss *filesLayerSHAStore) add(buildpackID, sha string, layer *bpLayer) error { + return layer.writeSha(sha) +} + +func (flss *filesLayerSHAStore) get(buildpackID string, layer bpLayer) (string, error) { + data, err := layer.read() + if err != nil { + return "", errors.Wrapf(err, "reading layer") + } + return data.SHA, nil +} + +type mapLayerSHAStore struct { + buildpacksToLayersShaMap map[string]layerToSha +} + +type layerToSha struct { + layerToShaMap map[string]string +} + +func (mlss *mapLayerSHAStore) add(buildpackID, sha string, layer *bpLayer) error { + mlss.addLayerToMap(buildpackID, layer.name(), sha) + return nil +} + +func (mlss *mapLayerSHAStore) get(buildpackID string, layer bpLayer) (string, error) { + return mlss.getShaByBuildpackLayer(buildpackID, layer.name()), nil +} + +func (mlss *mapLayerSHAStore) addLayerToMap(buildpackID, layerName, sha string) { + _, exists := mlss.buildpacksToLayersShaMap[buildpackID] + if !exists { + mlss.buildpacksToLayersShaMap[buildpackID] = layerToSha{make(map[string]string)} + } + mlss.buildpacksToLayersShaMap[buildpackID].layerToShaMap[layerName] = sha +} + +// if the layer exists for the buildpack ID, its SHA will be returned. Otherwise, an empty string will be returned. +func (mlss *mapLayerSHAStore) getShaByBuildpackLayer(buildpackID, layerName string) string { + if layerToSha, buildpackExists := mlss.buildpacksToLayersShaMap[buildpackID]; buildpackExists { + if sha, layerExists := layerToSha.layerToShaMap[layerName]; layerExists { + return sha + } + } + return "" +} + +func NewLayerMetadataRestorer(logger Logger, layersDir string, skipLayers bool) LayerMetadataRestorer { + return &DefaultLayerMetadataRestorer{ + logger: logger, + layersDir: layersDir, + skipLayers: skipLayers, + } +} + +func (la *DefaultLayerMetadataRestorer) Restore(buildpacks []buildpack.GroupBuildpack, appMeta platform.LayersMetadata, cacheMeta platform.CacheMetadata, layerSHAStore LayerSHAStore) error { + if err := la.restoreStoreTOML(appMeta, buildpacks); err != nil { + return err + } + + if err := la.restoreLayerMetadata(layerSHAStore, appMeta, cacheMeta, buildpacks); err != nil { + return err + } + + return nil +} + +func (la *DefaultLayerMetadataRestorer) restoreStoreTOML(appMeta platform.LayersMetadata, buildpacks []buildpack.GroupBuildpack) error { + for _, bp := range buildpacks { + if store := appMeta.MetadataForBuildpack(bp.ID).Store; store != nil { + if err := WriteTOML(filepath.Join(la.layersDir, launch.EscapeID(bp.ID), "store.toml"), store); err != nil { + return err + } + } + } + return nil +} + +func (la *DefaultLayerMetadataRestorer) restoreLayerMetadata(layerSHAStore LayerSHAStore, appMeta platform.LayersMetadata, cacheMeta platform.CacheMetadata, buildpacks []buildpack.GroupBuildpack) error { + if la.skipLayers { + la.logger.Infof("Skipping buildpack layer analysis") + return nil + } + + for _, buildpack := range buildpacks { + buildpackDir, err := readBuildpackLayersDir(la.layersDir, buildpack, la.logger) + if err != nil { + return errors.Wrap(err, "reading buildpack layer directory") + } + + // Restore metadata for launch=true layers. + // The restorer step will restore the layer data for cache=true layers if possible or delete the layer. + appLayers := appMeta.MetadataForBuildpack(buildpack.ID).Layers + cachedLayers := cacheMeta.MetadataForBuildpack(buildpack.ID).Layers + for layerName, layer := range appLayers { + identifier := fmt.Sprintf("%s:%s", buildpack.ID, layerName) + if !layer.Launch { + la.logger.Debugf("Not restoring metadata for %q, marked as launch=false", identifier) + continue + } + if layer.Build && !layer.Cache { + // layer is launch=true, build=true. Because build=true, the layer contents must be present in the build container. + // There is no reason to restore the metadata file, because the buildpack will always recreate the layer. + la.logger.Debugf("Not restoring metadata for %q, marked as build=true, cache=false", identifier) + continue + } + if layer.Cache { + if cacheLayer, ok := cachedLayers[layerName]; !ok || !cacheLayer.Cache { + // The layer is not cache=true in the cache metadata and will not be restored. + // Do not write the metadata file so that it is clear to the buildpack that it needs to recreate the layer. + // Although a launch=true (only) layer still needs a metadata file, the restorer will remove the file anyway when it does its cleanup (for buildpack apis < 0.6). + la.logger.Debugf("Not restoring metadata for %q, marked as cache=true, but not found in cache", identifier) + continue + } + } + la.logger.Infof("Restoring metadata for %q from app image", identifier) + if err := la.writeLayerMetadata(layerSHAStore, buildpackDir, layerName, layer, buildpack.ID); err != nil { + return err + } + } + + // Restore metadata for cache=true layers. + // The restorer step will restore the layer data if possible or delete the layer. + for layerName, layer := range cachedLayers { + identifier := fmt.Sprintf("%s:%s", buildpack.ID, layerName) + if !layer.Cache { + la.logger.Debugf("Not restoring %q from cache, marked as cache=false", identifier) + continue + } + // If launch=true, the metadata was restored from the app image or the layer is stale. + if layer.Launch { + la.logger.Debugf("Not restoring %q from cache, marked as launch=true", identifier) + continue + } + la.logger.Infof("Restoring metadata for %q from cache", identifier) + if err := la.writeLayerMetadata(layerSHAStore, buildpackDir, layerName, layer, buildpack.ID); err != nil { + return err + } + } + } + return nil +} + +func (la *DefaultLayerMetadataRestorer) writeLayerMetadata(layerSHAStore LayerSHAStore, buildpackDir bpLayersDir, layerName string, metadata platform.BuildpackLayerMetadata, buildpackID string) error { + layer := buildpackDir.newBPLayer(layerName, buildpackDir.buildpack.API, la.logger) + la.logger.Debugf("Writing layer metadata for %q", layer.Identifier()) + if err := layer.writeMetadata(metadata.LayerMetadataFile); err != nil { + return err + } + return layerSHAStore.add(buildpackID, metadata.SHA, layer) +} + +func retrieveCacheMetadata(cache Cache, logger Logger) (platform.CacheMetadata, error) { + // Create empty cache metadata in case a usable cache is not provided. + var cacheMeta platform.CacheMetadata + if cache != nil { + var err error + if !cache.Exists() { + logger.Info("Layer cache not found") + } + cacheMeta, err = cache.RetrieveMetadata() + if err != nil { + return cacheMeta, errors.Wrap(err, "retrieving cache metadata") + } + } else { + logger.Debug("Usable cache not provided, using empty cache metadata") + } + + return cacheMeta, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers.go b/vendor/github.com/buildpacks/lifecycle/layers.go new file mode 100644 index 0000000000..cd0d5f4cdc --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers.go @@ -0,0 +1,205 @@ +package lifecycle + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/BurntSushi/toml" + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/buildpack/layertypes" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/platform" +) + +type bpLayersDir struct { + path string + layers []bpLayer + name string + buildpack buildpack.GroupBuildpack + store *buildpack.StoreTOML +} + +func readBuildpackLayersDir(layersDir string, bp buildpack.GroupBuildpack, logger Logger) (bpLayersDir, error) { + path := filepath.Join(layersDir, launch.EscapeID(bp.ID)) + bpDir := bpLayersDir{ + name: bp.ID, + path: path, + layers: []bpLayer{}, + buildpack: bp, + } + + fis, err := ioutil.ReadDir(path) + if err != nil && !os.IsNotExist(err) { + return bpLayersDir{}, err + } + + names := map[string]struct{}{} + var tomls []string + for _, fi := range fis { + if fi.IsDir() { + bpDir.layers = append(bpDir.layers, *bpDir.newBPLayer(fi.Name(), bp.API, logger)) + names[fi.Name()] = struct{}{} + continue + } + if strings.HasSuffix(fi.Name(), ".toml") { + tomls = append(tomls, filepath.Join(path, fi.Name())) + } + } + + for _, tf := range tomls { + name := strings.TrimSuffix(filepath.Base(tf), ".toml") + if name == "store" { + var bpStore buildpack.StoreTOML + _, err := toml.DecodeFile(tf, &bpStore) + if err != nil { + return bpLayersDir{}, errors.Wrapf(err, "failed decoding store.toml for buildpack %q", bp.ID) + } + bpDir.store = &bpStore + continue + } + if name == "launch" { + // don't treat launch.toml as a layer + continue + } + if name == "build" && api.MustParse(bp.API).AtLeast("0.5") { + // if the buildpack API supports build.toml don't treat it as a layer + continue + } + if _, ok := names[name]; !ok { + bpDir.layers = append(bpDir.layers, *bpDir.newBPLayer(name, bp.API, logger)) + } + } + sort.Slice(bpDir.layers, func(i, j int) bool { + return bpDir.layers[i].identifier < bpDir.layers[j].identifier + }) + return bpDir, nil +} + +func forLaunch(l bpLayer) bool { + md, err := l.read() + return err == nil && md.Launch +} + +func forMalformed(l bpLayer) bool { + _, err := l.read() + return err != nil +} + +func forCached(l bpLayer) bool { + md, err := l.read() + return err == nil && md.Cache +} + +func (bd *bpLayersDir) findLayers(f func(layer bpLayer) bool) []bpLayer { + var selectedLayers []bpLayer + for _, l := range bd.layers { + if f(l) { + selectedLayers = append(selectedLayers, l) + } + } + return selectedLayers +} + +func (bd *bpLayersDir) newBPLayer(name, buildpackAPI string, logger Logger) *bpLayer { + return &bpLayer{ + layer: layer{ + path: filepath.Join(bd.path, name), + identifier: fmt.Sprintf("%s:%s", bd.buildpack.ID, name), + }, + api: buildpackAPI, + logger: logger, + } +} + +type bpLayer struct { // TODO: need to refactor so api and logger won't be part of this struct + layer + api string + logger Logger +} + +func (bp *bpLayer) read() (platform.BuildpackLayerMetadata, error) { + tomlPath := bp.path + ".toml" + layerMetadataFile, msg, err := buildpack.DecodeLayerMetadataFile(tomlPath, bp.api) + if err != nil { + return platform.BuildpackLayerMetadata{}, err + } + if msg != "" { + if api.MustParse(bp.api).LessThan("0.6") { + bp.logger.Warn(msg) + } else { + return platform.BuildpackLayerMetadata{}, errors.New(msg) + } + } + var sha string + shaBytes, err := ioutil.ReadFile(bp.path + ".sha") + if err != nil && !os.IsNotExist(err) { // if the sha file doesn't exist, an empty sha will be returned + return platform.BuildpackLayerMetadata{}, err + } + if err == nil { + sha = string(shaBytes) + } + return platform.BuildpackLayerMetadata{LayerMetadata: platform.LayerMetadata{SHA: sha}, LayerMetadataFile: layerMetadataFile}, nil +} + +func (bp *bpLayer) remove() error { + if err := os.RemoveAll(bp.path); err != nil && !os.IsNotExist(err) { + return err + } + if err := os.Remove(bp.path + ".sha"); err != nil && !os.IsNotExist(err) { + return err + } + if err := os.Remove(bp.path + ".toml"); err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +func (bp *bpLayer) writeMetadata(metadata layertypes.LayerMetadataFile) error { + path := filepath.Join(bp.path + ".toml") + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return err + } + return buildpack.EncodeLayerMetadataFile(metadata, path, bp.api) +} + +func (bp *bpLayer) hasLocalContents() bool { + _, err := ioutil.ReadDir(bp.path) + + return !os.IsNotExist(err) +} + +func (bp *bpLayer) writeSha(sha string) error { + if err := ioutil.WriteFile(bp.path+".sha", []byte(sha), 0666); err != nil { + return err + } // #nosec G306 + return nil +} + +func (bp *bpLayer) name() string { + return filepath.Base(bp.path) +} + +type layer struct { + path string + identifier string +} + +func (l *layer) Identifier() string { + return l.identifier +} + +func (l *layer) Path() string { + return l.path +} + +type layerDir interface { + Identifier() string + Path() string +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers/digest.go b/vendor/github.com/buildpacks/lifecycle/layers/digest.go new file mode 100644 index 0000000000..9e086a6039 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers/digest.go @@ -0,0 +1,46 @@ +package layers + +import ( + "hash" + "sync" +) + +// concurrentHasher wraps a hash.Hash so that writes to it +// happen on a separate go routine. +type concurrentHasher struct { + hash hash.Hash + wg sync.WaitGroup + buffers chan []byte +} + +func newConcurrentHasher(h hash.Hash) *concurrentHasher { + ch := &concurrentHasher{ + hash: h, + buffers: make(chan []byte, 10), + } + + go func() { + for b := range ch.buffers { + _, _ = ch.hash.Write(b) + ch.wg.Done() + } + }() + + return ch +} + +func (ch *concurrentHasher) Write(p []byte) (int, error) { + cp := make([]byte, len(p)) + copy(cp, p) + + ch.wg.Add(1) + ch.buffers <- cp + + return len(p), nil +} + +func (ch *concurrentHasher) Sum(b []byte) []byte { + ch.wg.Wait() + close(ch.buffers) + return ch.hash.Sum(b) +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers/dir.go b/vendor/github.com/buildpacks/lifecycle/layers/dir.go new file mode 100644 index 0000000000..7b4f47edd6 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers/dir.go @@ -0,0 +1,29 @@ +package layers + +import ( + "path/filepath" + + "github.com/buildpacks/lifecycle/archive" +) + +// DirLayer creates a layer from the given directory +// DirLayer will set the UID and GID of entries describing dir and its children (but not its parents) +// to Factory.UID and Factory.GID +func (f *Factory) DirLayer(id string, dir string) (layer Layer, err error) { + dir, err = filepath.Abs(dir) + if err != nil { + return Layer{}, err + } + parents, err := parents(dir) + if err != nil { + return Layer{}, err + } + return f.writeLayer(id, func(tw *archive.NormalizingTarWriter) error { + if err := archive.AddFilesToArchive(tw, parents); err != nil { + return err + } + tw.WithUID(f.UID) + tw.WithGID(f.GID) + return archive.AddDirToArchive(tw, dir) + }) +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers/extract.go b/vendor/github.com/buildpacks/lifecycle/layers/extract.go new file mode 100644 index 0000000000..7d0e9f4b88 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers/extract.go @@ -0,0 +1,33 @@ +package layers + +import ( + "archive/tar" + "io" + "runtime" + + "github.com/buildpacks/lifecycle/archive" +) + +// Extract extracts entries from r to the dest directory +// Contents of r should be an OCI layer. +// If dest is an empty string files with be extracted to `/` or `c:\` on unix and windows filesystems respectively. +func Extract(r io.Reader, dest string) error { + tr := tarReader(r, dest) + return archive.Extract(tr) +} + +func tarReader(r io.Reader, dest string) archive.TarReader { + tr := archive.NewNormalizingTarReader(tar.NewReader(r)) + if runtime.GOOS == "windows" { + tr.ExcludePaths([]string{"Hives"}) + tr.Strip(`Files/`) + if dest == "" { + dest = `c:\` + } + } + if dest == "" { + dest = `/` + } + tr.PrependDir(dest) + return tr +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers/factory.go b/vendor/github.com/buildpacks/lifecycle/layers/factory.go new file mode 100644 index 0000000000..fb1f4122e0 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers/factory.go @@ -0,0 +1,99 @@ +package layers + +import ( + "os" + "path/filepath" + "strings" + + "github.com/buildpacks/lifecycle/archive" +) + +type Factory struct { + ArtifactsDir string // ArtifactsDir is the directory where layer files are written + UID, GID int // UID and GID are used to normalize layer entries + Logger Logger + + tarHashes map[string]string // tarHases Stores hashes of layer tarballs for reuse between the export and cache steps. +} + +type Layer struct { + ID string + TarPath string + Digest string +} + +type Logger interface { + Debug(msg string) + Debugf(fmt string, v ...interface{}) + + Info(msg string) + Infof(fmt string, v ...interface{}) + + Warn(msg string) + Warnf(fmt string, v ...interface{}) + + Error(msg string) + Errorf(fmt string, v ...interface{}) +} + +func (f *Factory) writeLayer(id string, addEntries func(tw *archive.NormalizingTarWriter) error) (layer Layer, err error) { + tarPath := filepath.Join(f.ArtifactsDir, escape(id)+".tar") + if f.tarHashes == nil { + f.tarHashes = make(map[string]string) + } + if sha, ok := f.tarHashes[tarPath]; ok { + f.Logger.Debugf("Reusing tarball for layer %q with SHA: %s\n", id, sha) + return Layer{ + ID: id, + TarPath: tarPath, + Digest: sha, + }, nil + } + lw, err := newFileLayerWriter(tarPath) + if err != nil { + return Layer{}, err + } + defer func() { + if closeErr := lw.Close(); err == nil { + err = closeErr + } + }() + tw := tarWriter(lw) + if err := addEntries(tw); err != nil { + return Layer{}, err + } + + if err := tw.Close(); err != nil { + return Layer{}, err + } + digest := lw.Digest() + f.tarHashes[tarPath] = digest + return Layer{ + ID: id, + Digest: digest, + TarPath: tarPath, + }, err +} + +func escape(id string) string { + return strings.Replace(id, "/", "_", -1) +} + +func parents(file string) ([]archive.PathInfo, error) { + parent := filepath.Dir(file) + if parent == filepath.VolumeName(file)+`\` || parent == "/" { + return []archive.PathInfo{}, nil + } + fi, err := os.Stat(parent) + if err != nil { + return nil, err + } + parentDirs, err := parents(parent) + if err != nil { + return nil, err + } + return append(parentDirs, archive.PathInfo{ + Path: parent, + Info: fi, + }), nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers/launcher.go b/vendor/github.com/buildpacks/lifecycle/layers/launcher.go new file mode 100644 index 0000000000..67124fb45d --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers/launcher.go @@ -0,0 +1,125 @@ +package layers + +import ( + "archive/tar" + "fmt" + "io" + "os" + "runtime" + "strings" + + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/archive" + "github.com/buildpacks/lifecycle/launch" +) + +// LauncherLayer creates a Layer containing the launcher at path +func (f *Factory) LauncherLayer(path string) (layer Layer, err error) { + parents := []*tar.Header{ + rootOwnedDir(launch.CNBDir), + rootOwnedDir("/cnb/lifecycle"), + } + fi, err := os.Stat(path) + if err != nil { + return Layer{}, fmt.Errorf("failed to stat launcher at path '%s': %w", path, err) + } + hdr, err := tar.FileInfoHeader(fi, "") + if err != nil { + return Layer{}, fmt.Errorf("failed create TAR header for launcher at path '%s': %w", path, err) + } + hdr.Name = launch.LauncherPath + hdr.Uid = 0 + hdr.Gid = 0 + if runtime.GOOS == "windows" { + hdr.Mode = 0777 + } else { + hdr.Mode = 0755 + } + + return f.writeLayer("launcher", func(tw *archive.NormalizingTarWriter) error { + for _, dir := range parents { + if err := tw.WriteHeader(dir); err != nil { + return err + } + } + if err := tw.WriteHeader(hdr); err != nil { + return errors.Wrap(err, "failed to write header for launcher") + } + + lf, err := os.Open(path) + if err != nil { + return fmt.Errorf("failed to open launcher at path '%s': %w", path, err) + } + defer lf.Close() + if _, err := io.Copy(tw, lf); err != nil { + return errors.Wrap(err, "failed to write launcher to layer") + } + return nil + }) +} + +// ProcessTypesLayer creates a Layer containing symlinks pointing to target where: +// * any parents of the symlink files will also be added to the layer +// * symlinks and their parent directories shall be root owned and world readable +func (f *Factory) ProcessTypesLayer(config launch.Metadata) (layer Layer, err error) { + hdrs := []*tar.Header{ + rootOwnedDir(launch.CNBDir), + rootOwnedDir(launch.ProcessDir), + } + for _, proc := range config.Processes { + if len(proc.Type) == 0 { + return Layer{}, errors.New("type is required for all processes") + } + if err := validateProcessType(proc.Type); err != nil { + return Layer{}, errors.Wrapf(err, "invalid process type '%s'", proc.Type) + } + hdrs = append(hdrs, typeSymlink(launch.ProcessPath(proc.Type))) + } + + return f.writeLayer("process-types", func(tw *archive.NormalizingTarWriter) error { + for _, hdr := range hdrs { + if err := tw.WriteHeader(hdr); err != nil { + return err + } + } + return nil + }) +} + +func validateProcessType(pType string) error { + forbiddenCharacters := `/><:|&\` + if strings.ContainsAny(pType, forbiddenCharacters) { + return fmt.Errorf(`type may not contain characters '%s'`, forbiddenCharacters) + } + return nil +} + +func rootOwnedDir(path string) *tar.Header { + var modePerm int64 + if runtime.GOOS == "windows" { + modePerm = 0777 + } else { + modePerm = 0755 + } + return &tar.Header{ + Typeflag: tar.TypeDir, + Name: path, + Mode: modePerm, + } +} + +func typeSymlink(path string) *tar.Header { + var modePerm int64 + if runtime.GOOS == "windows" { + modePerm = 0777 + } else { + modePerm = 0755 + } + return &tar.Header{ + Typeflag: tar.TypeSymlink, + Name: path, + Linkname: launch.LauncherPath, + Mode: modePerm, + } +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers/slices.go b/vendor/github.com/buildpacks/lifecycle/layers/slices.go new file mode 100644 index 0000000000..46ba170ff8 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers/slices.go @@ -0,0 +1,251 @@ +package layers + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/archive" +) + +type Slice struct { + Paths []string `toml:"paths"` +} + +// SliceLayers divides dir into layers using slices using the following process: +// * Given n slices SliceLayers will return n+1 layers +// * The first n layers will contain files matched by the any Path in the nth Slice +// * The final layer will contain any files in dir that were not included in a previous layer +// Some layers may be empty +func (f *Factory) SliceLayers(dir string, slices []Slice) ([]Layer, error) { + var sliceLayers []Layer + dir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + sdir, err := newSlicableDir(dir) + if err != nil { + return nil, err + } + + //add one layer per slice + for i, slice := range slices { + layerID := fmt.Sprintf("slice-%d", i+1) + layer, err := f.createLayerFromSlice(slice, sdir, layerID) + if err != nil { + return nil, err + } + sliceLayers = append(sliceLayers, layer) + } + + //add remaining files in a single layer + layerID := fmt.Sprintf("slice-%d", len(slices)+1) + finalLayer, err := f.createLayerFromFiles(layerID, sdir, sdir.remainingFiles()) + if err != nil { + return nil, err + } + return append(sliceLayers, finalLayer), nil +} + +func (f *Factory) createLayerFromSlice(slice Slice, sdir *sliceableDir, layerID string) (Layer, error) { + var matches []string + for _, path := range slice.Paths { + globMatches, err := glob(sdir, path) + if err != nil { + return Layer{}, err + } + matches = append(matches, globMatches...) + } + return f.createLayerFromFiles(layerID, sdir, sdir.sliceFiles(matches)) +} + +func glob(sdir *sliceableDir, pattern string) ([]string, error) { + pattern = filepath.Clean(pattern) + var matches []string + if err := filepath.Walk(sdir.path, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if path == sdir.path { + return nil + } + relPath, err := filepath.Rel(sdir.path, path) + if err != nil { + return err + } + match, err := filepath.Match(pattern, relPath) + if err != nil { + return errors.Wrapf(err, "failed to check if '%s' matches '%s'", relPath, pattern) + } + if match { + matches = append(matches, path) + } + return nil + }); err != nil { + return nil, err + } + return matches, nil +} + +func (f *Factory) createLayerFromFiles(layerID string, sdir *sliceableDir, files []archive.PathInfo) (layer Layer, err error) { + sort.SliceStable(files, func(i, j int) bool { + return files[i].Path < files[j].Path + }) + return f.writeLayer(layerID, func(tw *archive.NormalizingTarWriter) error { + if len(files) != 0 { + if err := archive.AddFilesToArchive(tw, sdir.parentDirs); err != nil { + return err + } + tw.WithUID(f.UID) + tw.WithGID(f.GID) + return archive.AddFilesToArchive(tw, files) + } + return nil + }) +} + +type sliceableDir struct { + path string // path to slicableDir + slicedFiles map[string]bool // map showing which paths are already sliced + pathInfos map[string]os.FileInfo // map of path to file info + subDirs map[string][]string // map dirs to children + parentDirs []archive.PathInfo // parents of the slicableDir +} + +func newSlicableDir(appDir string) (*sliceableDir, error) { + sdir := &sliceableDir{ + path: appDir, + slicedFiles: map[string]bool{}, + pathInfos: map[string]os.FileInfo{}, + subDirs: map[string][]string{}, + } + if err := filepath.Walk(appDir, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + sdir.slicedFiles[path] = false + sdir.pathInfos[path] = fi + if fi.IsDir() { + children, err := ioutil.ReadDir(path) + if err != nil { + return err + } + for _, child := range children { + sdir.subDirs[path] = append(sdir.subDirs[path], filepath.Join(path, child.Name())) + } + } + return nil + }); err != nil { + return nil, err + } + parentDirs, err := parents(appDir) + if err != nil { + return nil, err + } + sdir.parentDirs = parentDirs + return sdir, nil +} + +func (sd *sliceableDir) sliceFiles(paths []string) []archive.PathInfo { + slicedFiles := map[string]os.FileInfo{} + for _, match := range paths { + sd.addMatchedFiles(slicedFiles, match) + } + return sd.fillInMissingParents(slicedFiles) +} + +func (sd *sliceableDir) addMatchedFiles(matchedFiles map[string]os.FileInfo, match string) { + if added, ok := sd.slicedFiles[match]; !ok || added { + // don't add files that live outside the app dir + // don't add files were already added + return + } + if children, ok := sd.subDirs[match]; ok { + for _, child := range children { + sd.addMatchedFiles(matchedFiles, child) + } + } + matchedFiles[match] = sd.pathInfos[match] + sd.slicedFiles[match] = true +} + +func (sd *sliceableDir) fillInMissingParents(matchedFiles map[string]os.FileInfo) []archive.PathInfo { + // add parent dirs for matched files if they are missing + var parentToCheck []string + var files []archive.PathInfo + addedParents := map[string]struct{}{} + for path, info := range matchedFiles { + files = append(files, archive.PathInfo{ + Path: path, + Info: info, + }) + parents := sd.fileParents(path) + for _, parent := range parents { + if _, ok := matchedFiles[parent.Path]; ok { + // don't add if it was slices as part of a match + continue + } + if _, ok := addedParents[parent.Path]; ok { + // don't add if it was already added + continue + } + parentToCheck = append(parentToCheck, parent.Path) + addedParents[parent.Path] = struct{}{} + files = append(files, parent) + } + } + + // sort the dirs by their path depth (deepest -> most shallow) so we always process children first + sort.SliceStable(parentToCheck, func(i, j int) bool { + return len(strings.Split(parentToCheck[i], string(os.PathSeparator))) > len(strings.Split(parentToCheck[j], string(os.PathSeparator))) + }) + + // if all children are slices, mark the dir as sliced + for _, dir := range parentToCheck { + allChildrenAdded := true + if children, ok := sd.subDirs[dir]; ok { + for _, child := range children { + if added, ok := sd.slicedFiles[child]; ok && !added { + allChildrenAdded = false + break + } + } + } + sd.slicedFiles[dir] = allChildrenAdded + } + return files +} + +func (sd *sliceableDir) remainingFiles() []archive.PathInfo { + var files []archive.PathInfo + for path, info := range sd.pathInfos { + if added, ok := sd.slicedFiles[path]; !ok || added { + continue + } + files = append(files, archive.PathInfo{ + Path: path, + Info: info, + }) + sd.slicedFiles[path] = true + } + return files +} + +// return parents within the sliceableDir +func (sd *sliceableDir) fileParents(file string) []archive.PathInfo { + parent := filepath.Dir(file) + if parent == sd.path { + return []archive.PathInfo{ + {Path: sd.path, Info: sd.pathInfos[sd.path]}, + } + } + return append(sd.fileParents(parent), archive.PathInfo{ + Path: parent, + Info: sd.pathInfos[parent], + }) +} diff --git a/vendor/github.com/buildpacks/lifecycle/layers/writer.go b/vendor/github.com/buildpacks/lifecycle/layers/writer.go new file mode 100644 index 0000000000..cc6df8dab5 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/layers/writer.go @@ -0,0 +1,46 @@ +package layers + +import ( + "archive/tar" + "crypto/sha256" + "fmt" + "io" + "os" + "runtime" + + "github.com/buildpacks/imgutil/layer" + + "github.com/buildpacks/lifecycle/archive" +) + +type layerWriter struct { + io.Writer + io.Closer + hasher *concurrentHasher + path string +} + +func newFileLayerWriter(dest string) (*layerWriter, error) { + hasher := newConcurrentHasher(sha256.New()) + file, err := os.Create(dest) + if err != nil { + return nil, err + } + w := io.MultiWriter(hasher, file) + return &layerWriter{w, file, hasher, dest}, nil +} + +func (lw *layerWriter) Digest() string { + return fmt.Sprintf("sha256:%x", lw.hasher.Sum(nil)) +} + +func tarWriter(lw *layerWriter) *archive.NormalizingTarWriter { + var tw *archive.NormalizingTarWriter + if runtime.GOOS == "windows" { + tw = archive.NewNormalizingTarWriter(layer.NewWindowsWriter(lw)) + } else { + tw = archive.NewNormalizingTarWriter(tar.NewWriter(lw)) + } + tw.WithModTime(archive.NormalizedModTime) + return tw +} diff --git a/vendor/github.com/buildpacks/lifecycle/lifecycle.toml b/vendor/github.com/buildpacks/lifecycle/lifecycle.toml new file mode 100644 index 0000000000..fa16a2910c --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/lifecycle.toml @@ -0,0 +1,16 @@ +[apis] +[apis.buildpack] + deprecated = [] + supported = ["0.2", "0.3", "0.4", "0.5", "0.6"] +[apis.platform] + deprecated = [] + supported = ["0.3", "0.4", "0.5", "0.6", "0.7"] + +# For backwards compatibility the minimum supported APIs are provided in RFC-0011 format +# https://github.com/buildpacks/rfcs/blob/main/text/0011-lifecycle-descriptor.md +[api] + platform = "0.3" + buildpack = "0.2" + +[lifecycle] + version = "{{.lifecycle_version}}" diff --git a/vendor/github.com/buildpacks/lifecycle/logger.go b/vendor/github.com/buildpacks/lifecycle/logger.go new file mode 100644 index 0000000000..6daaf01902 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/logger.go @@ -0,0 +1,15 @@ +package lifecycle + +type Logger interface { + Debug(msg string) + Debugf(fmt string, v ...interface{}) + + Info(msg string) + Infof(fmt string, v ...interface{}) + + Warn(msg string) + Warnf(fmt string, v ...interface{}) + + Error(msg string) + Errorf(fmt string, v ...interface{}) +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/cache.go b/vendor/github.com/buildpacks/lifecycle/platform/cache.go new file mode 100644 index 0000000000..073e03d236 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/cache.go @@ -0,0 +1,14 @@ +package platform + +type CacheMetadata struct { + Buildpacks []BuildpackLayersMetadata `json:"buildpacks"` +} + +func (cm *CacheMetadata) MetadataForBuildpack(id string) BuildpackLayersMetadata { + for _, bpMD := range cm.Buildpacks { + if bpMD.ID == id { + return bpMD + } + } + return BuildpackLayersMetadata{} +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/common/platform.go b/vendor/github.com/buildpacks/lifecycle/platform/common/platform.go new file mode 100644 index 0000000000..e1baea9eb9 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/common/platform.go @@ -0,0 +1,10 @@ +package common + +import ( + "github.com/buildpacks/lifecycle/cmd" +) + +type Platform interface { + API() string + CodeFor(errType cmd.LifecycleExitError) int +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/files.go b/vendor/github.com/buildpacks/lifecycle/platform/files.go new file mode 100644 index 0000000000..615532cffe --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/files.go @@ -0,0 +1,245 @@ +// Data Format Files for the platform api spec (https://github.com/buildpacks/spec/blob/main/platform.md#data-format). + +package platform + +import ( + "github.com/google/go-containerregistry/pkg/name" + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/buildpack/layertypes" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/layers" +) + +// analyzed.toml + +type AnalyzedMetadata struct { + PreviousImage *ImageIdentifier `toml:"image"` + Metadata LayersMetadata `toml:"metadata"` + RunImage *ImageIdentifier `toml:"run-image,omitempty"` +} + +// FIXME: fix key names to be accurate in the daemon case +type ImageIdentifier struct { + Reference string `toml:"reference"` +} + +// NOTE: This struct MUST be kept in sync with `LayersMetadataCompat` +type LayersMetadata struct { + App []LayerMetadata `json:"app" toml:"app"` + Buildpacks []BuildpackLayersMetadata `json:"buildpacks" toml:"buildpacks"` + Config LayerMetadata `json:"config" toml:"config"` + Launcher LayerMetadata `json:"launcher" toml:"launcher"` + ProcessTypes LayerMetadata `json:"process-types" toml:"process-types"` + RunImage RunImageMetadata `json:"runImage" toml:"run-image"` + Stack StackMetadata `json:"stack" toml:"stack"` +} + +// NOTE: This struct MUST be kept in sync with `LayersMetadata`. +// It exists for situations where the `App` field type cannot be +// guaranteed, yet the original struct data must be maintained. +type LayersMetadataCompat struct { + App interface{} `json:"app" toml:"app"` + Buildpacks []BuildpackLayersMetadata `json:"buildpacks" toml:"buildpacks"` + Config LayerMetadata `json:"config" toml:"config"` + Launcher LayerMetadata `json:"launcher" toml:"launcher"` + ProcessTypes LayerMetadata `json:"process-types" toml:"process-types"` + RunImage RunImageMetadata `json:"runImage" toml:"run-image"` + Stack StackMetadata `json:"stack" toml:"stack"` +} + +func (m *LayersMetadata) MetadataForBuildpack(id string) BuildpackLayersMetadata { + for _, bpMD := range m.Buildpacks { + if bpMD.ID == id { + return bpMD + } + } + return BuildpackLayersMetadata{} +} + +type LayerMetadata struct { + SHA string `json:"sha" toml:"sha"` +} + +type BuildpackLayersMetadata struct { + ID string `json:"key" toml:"key"` + Version string `json:"version" toml:"version"` + Layers map[string]BuildpackLayerMetadata `json:"layers" toml:"layers"` + Store *buildpack.StoreTOML `json:"store,omitempty" toml:"store"` +} + +type BuildpackLayerMetadata struct { + LayerMetadata + layertypes.LayerMetadataFile +} + +type RunImageMetadata struct { + TopLayer string `json:"topLayer" toml:"top-layer"` + Reference string `json:"reference" toml:"reference"` +} + +// metadata.toml + +type BuildMetadata struct { + BOM []buildpack.BOMEntry `toml:"bom" json:"bom"` + Buildpacks []buildpack.GroupBuildpack `toml:"buildpacks" json:"buildpacks"` + Labels []buildpack.Label `toml:"labels" json:"-"` + Launcher LauncherMetadata `toml:"-" json:"launcher"` + Processes []launch.Process `toml:"processes" json:"processes"` + Slices []layers.Slice `toml:"slices" json:"-"` + BuildpackDefaultProcessType string `toml:"buildpack-default-process-type,omitempty" json:"buildpack-default-process-type,omitempty"` +} + +type LauncherMetadata struct { + Version string `json:"version"` + Source SourceMetadata `json:"source"` +} + +type SourceMetadata struct { + Git GitMetadata `json:"git"` +} + +type GitMetadata struct { + Repository string `json:"repository"` + Commit string `json:"commit"` +} + +func (md BuildMetadata) ToLaunchMD() launch.Metadata { + lmd := launch.Metadata{ + Processes: md.Processes, + } + for _, bp := range md.Buildpacks { + lmd.Buildpacks = append(lmd.Buildpacks, launch.Buildpack{ + API: bp.API, + ID: bp.ID, + }) + } + return lmd +} + +// plan.toml + +type BuildPlan struct { + Entries []BuildPlanEntry `toml:"entries"` +} + +func (p BuildPlan) Find(bpID string) buildpack.Plan { + var out []buildpack.Require + for _, entry := range p.Entries { + for _, provider := range entry.Providers { + if provider.ID == bpID { + out = append(out, entry.Requires...) + break + } + } + } + return buildpack.Plan{Entries: out} +} + +// TODO: ensure at least one claimed entry of each name is provided by the BP +func (p BuildPlan) Filter(metRequires []string) BuildPlan { + var out []BuildPlanEntry + for _, planEntry := range p.Entries { + if !containsEntry(metRequires, planEntry) { + out = append(out, planEntry) + } + } + return BuildPlan{Entries: out} +} + +func containsEntry(metRequires []string, entry BuildPlanEntry) bool { + for _, met := range metRequires { + for _, planReq := range entry.Requires { + if met == planReq.Name { + return true + } + } + } + return false +} + +type BuildPlanEntry struct { + Providers []buildpack.GroupBuildpack `toml:"providers"` + Requires []buildpack.Require `toml:"requires"` +} + +func (be BuildPlanEntry) NoOpt() BuildPlanEntry { + var out []buildpack.GroupBuildpack + for _, p := range be.Providers { + out = append(out, p.NoOpt().NoAPI().NoHomepage()) + } + be.Providers = out + return be +} + +// project-metadata.toml + +type ProjectMetadata struct { + Source *ProjectSource `toml:"source" json:"source,omitempty"` +} + +type ProjectSource struct { + Type string `toml:"type" json:"type,omitempty"` + Version map[string]interface{} `toml:"version" json:"version,omitempty"` + Metadata map[string]interface{} `toml:"metadata" json:"metadata,omitempty"` +} + +// report.toml + +type ExportReport struct { + Build BuildReport `toml:"build,omitempty"` + Image ImageReport `toml:"image"` +} + +type BuildReport struct { + BOM []buildpack.BOMEntry `toml:"bom"` +} + +type ImageReport struct { + Tags []string `toml:"tags"` + ImageID string `toml:"image-id,omitempty"` + Digest string `toml:"digest,omitempty"` + ManifestSize int64 `toml:"manifest-size,omitzero"` +} + +// stack.toml + +type StackMetadata struct { + RunImage StackRunImageMetadata `json:"runImage" toml:"run-image"` +} + +type StackRunImageMetadata struct { + Image string `toml:"image" json:"image"` + Mirrors []string `toml:"mirrors" json:"mirrors,omitempty"` +} + +func (sm *StackMetadata) BestRunImageMirror(registry string) (string, error) { + if sm.RunImage.Image == "" { + return "", errors.New("missing run-image metadata") + } + runImageMirrors := []string{sm.RunImage.Image} + runImageMirrors = append(runImageMirrors, sm.RunImage.Mirrors...) + runImageRef, err := byRegistry(registry, runImageMirrors) + if err != nil { + return "", errors.Wrap(err, "failed to find run-image") + } + return runImageRef, nil +} + +func byRegistry(reg string, imgs []string) (string, error) { + if len(imgs) < 1 { + return "", errors.New("no images provided to search") + } + + for _, img := range imgs { + ref, err := name.ParseReference(img, name.WeakValidation) + if err != nil { + continue + } + if reg == ref.Context().RegistryStr() { + return img, nil + } + } + return imgs[0], nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/labels.go b/vendor/github.com/buildpacks/lifecycle/platform/labels.go new file mode 100644 index 0000000000..63de14bf06 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/labels.go @@ -0,0 +1,9 @@ +package platform + +const ( + BuildMetadataLabel = "io.buildpacks.build.metadata" + LayerMetadataLabel = "io.buildpacks.lifecycle.metadata" + ProjectMetadataLabel = "io.buildpacks.project.metadata" + StackIDLabel = "io.buildpacks.stack.id" + MixinsLabel = "io.buildpacks.stack.mixins" +) diff --git a/vendor/github.com/buildpacks/lifecycle/platform/platform.go b/vendor/github.com/buildpacks/lifecycle/platform/platform.go new file mode 100644 index 0000000000..5b745326a1 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/platform.go @@ -0,0 +1,32 @@ +package platform + +import ( + "fmt" + + "github.com/buildpacks/lifecycle/platform/common" + "github.com/buildpacks/lifecycle/platform/pre06" + v06 "github.com/buildpacks/lifecycle/platform/v06" + v07 "github.com/buildpacks/lifecycle/platform/v07" +) + +var platform03 = pre06.NewPlatform("0.3") +var platform04 = pre06.NewPlatform("0.4") +var platform05 = pre06.NewPlatform("0.5") +var platform06 = v06.NewPlatform(platform05) +var platform07 = v07.NewPlatform(platform06) + +var supportedPlatforms = map[string]common.Platform{ + "0.3": platform03, + "0.4": platform04, + "0.5": platform05, + "0.6": platform06, + "0.7": platform07, +} + +func NewPlatform(apiStr string) (common.Platform, error) { + p, ok := supportedPlatforms[apiStr] + if !ok { + return nil, fmt.Errorf("unable to create platform for api %s: unknown api", apiStr) + } + return p, nil +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/pre06/exit.go b/vendor/github.com/buildpacks/lifecycle/platform/pre06/exit.go new file mode 100644 index 0000000000..dfb39ab90e --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/pre06/exit.go @@ -0,0 +1,38 @@ +package pre06 + +import ( + "github.com/buildpacks/lifecycle/cmd" +) + +var exitCodes = map[cmd.LifecycleExitError]int{ + // detect phase errors: 100-199 + cmd.FailedDetect: 100, // FailedDetect indicates that no buildpacks detected + cmd.FailedDetectWithErrors: 101, // FailedDetectWithErrors indicated that no buildpacks detected and at least one errored + cmd.DetectError: 102, // DetectError indicates generic detect error + + // analyze phase errors: 200-299 + cmd.AnalyzeError: 202, // AnalyzeError indicates generic analyze error + + // restore phase errors: 300-399 + cmd.RestoreError: 302, // RestoreError indicates generic restore error + + // build phase errors: 400-499 + cmd.FailedBuildWithErrors: 401, // FailedBuildWithErrors indicates buildpack error during /bin/build + cmd.BuildError: 402, // BuildError indicates generic build error + + // export phase errors: 500-599 + cmd.ExportError: 502, // ExportError indicates generic export error + + // rebase phase errors: 600-699 + cmd.RebaseError: 602, // RebaseError indicates generic rebase error + + // launch phase errors: 700-799 + cmd.LaunchError: 702, // LaunchError indicates generic launch error +} + +func (p *pre06Platform) CodeFor(errType cmd.LifecycleExitError) int { + if code, ok := exitCodes[errType]; ok { + return code + } + return cmd.CodeFailed +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/pre06/platform.go b/vendor/github.com/buildpacks/lifecycle/platform/pre06/platform.go new file mode 100644 index 0000000000..357abc54a2 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/pre06/platform.go @@ -0,0 +1,20 @@ +package pre06 + +import ( + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/platform/common" +) + +type pre06Platform struct { + api *api.Version +} + +func NewPlatform(apiString string) common.Platform { + return &pre06Platform{ + api: api.MustParse(apiString), + } +} + +func (p *pre06Platform) API() string { + return p.api.String() +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/v06/exit.go b/vendor/github.com/buildpacks/lifecycle/platform/v06/exit.go new file mode 100644 index 0000000000..a2687ef3a5 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/v06/exit.go @@ -0,0 +1,38 @@ +package v06 + +import ( + "github.com/buildpacks/lifecycle/cmd" +) + +var exitCodes = map[cmd.LifecycleExitError]int{ + // detect phase errors: 20-29 + cmd.FailedDetect: 20, // FailedDetect indicates that no buildpacks detected + cmd.FailedDetectWithErrors: 21, // FailedDetectWithErrors indicated that no buildpacks detected and at least one errored + cmd.DetectError: 22, // DetectError indicates generic detect error + + // analyze phase errors: 30-39 + cmd.AnalyzeError: 32, // AnalyzeError indicates generic analyze error + + // restore phase errors: 40-49 + cmd.RestoreError: 42, // RestoreError indicates generic restore error + + // build phase errors: 50-59 + cmd.FailedBuildWithErrors: 51, // FailedBuildWithErrors indicates buildpack error during /bin/build + cmd.BuildError: 52, // BuildError indicates generic build error + + // export phase errors: 60-69 + cmd.ExportError: 62, // ExportError indicates generic export error + + // rebase phase errors: 70-79 + cmd.RebaseError: 72, // RebaseError indicates generic rebase error + + // launch phase errors: 80-89 + cmd.LaunchError: 82, // LaunchError indicates generic launch error +} + +func (p *v06Platform) CodeFor(errType cmd.LifecycleExitError) int { + if code, ok := exitCodes[errType]; ok { + return code + } + return cmd.CodeFailed +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/v06/platform.go b/vendor/github.com/buildpacks/lifecycle/platform/v06/platform.go new file mode 100644 index 0000000000..fe78fbc2bf --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/v06/platform.go @@ -0,0 +1,22 @@ +package v06 + +import ( + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/platform/common" +) + +type v06Platform struct { + api *api.Version + previousPlatform common.Platform +} + +func NewPlatform(previousPlatform common.Platform) common.Platform { + return &v06Platform{ + api: api.MustParse("0.6"), + previousPlatform: previousPlatform, + } +} + +func (p *v06Platform) API() string { + return p.api.String() +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/v07/exit.go b/vendor/github.com/buildpacks/lifecycle/platform/v07/exit.go new file mode 100644 index 0000000000..2c6d891791 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/v07/exit.go @@ -0,0 +1,9 @@ +package v07 + +import ( + "github.com/buildpacks/lifecycle/cmd" +) + +func (p *v07Platform) CodeFor(errType cmd.LifecycleExitError) int { + return p.previousPlatform.CodeFor(errType) +} diff --git a/vendor/github.com/buildpacks/lifecycle/platform/v07/platform.go b/vendor/github.com/buildpacks/lifecycle/platform/v07/platform.go new file mode 100644 index 0000000000..96aa8cc76a --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/platform/v07/platform.go @@ -0,0 +1,22 @@ +package v07 + +import ( + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/platform/common" +) + +type v07Platform struct { + api *api.Version + previousPlatform common.Platform +} + +func NewPlatform(previousPlatform common.Platform) common.Platform { + return &v07Platform{ + api: api.MustParse("0.7"), + previousPlatform: previousPlatform, + } +} + +func (p *v07Platform) API() string { + return p.api.String() +} diff --git a/vendor/github.com/buildpacks/lifecycle/rebaser.go b/vendor/github.com/buildpacks/lifecycle/rebaser.go new file mode 100644 index 0000000000..e649dea75c --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/rebaser.go @@ -0,0 +1,126 @@ +package lifecycle + +import ( + "encoding/json" + "fmt" + "sort" + "strings" + + "github.com/buildpacks/imgutil" + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/platform" +) + +type Rebaser struct { + Logger Logger + PlatformAPI *api.Version +} + +type RebaseReport struct { + Image platform.ImageReport `toml:"image"` +} + +func (r *Rebaser) Rebase(appImage imgutil.Image, newBaseImage imgutil.Image, additionalNames []string) (RebaseReport, error) { + var origMetadata platform.LayersMetadataCompat + if err := DecodeLabel(appImage, platform.LayerMetadataLabel, &origMetadata); err != nil { + return RebaseReport{}, errors.Wrap(err, "get image metadata") + } + + appStackID, err := appImage.Label(platform.StackIDLabel) + if err != nil { + return RebaseReport{}, errors.Wrap(err, "get app image stack") + } + + newBaseStackID, err := newBaseImage.Label(platform.StackIDLabel) + if err != nil { + return RebaseReport{}, errors.Wrap(err, "get new base image stack") + } + + if appStackID == "" { + return RebaseReport{}, errors.New("stack not defined on app image") + } + + if newBaseStackID == "" { + return RebaseReport{}, errors.New("stack not defined on new base image") + } + + if appStackID != newBaseStackID { + return RebaseReport{}, errors.New(fmt.Sprintf("incompatible stack: '%s' is not compatible with '%s'", newBaseStackID, appStackID)) + } + + if err := validateMixins(appImage, newBaseImage); err != nil { + return RebaseReport{}, err + } + + if err := appImage.Rebase(origMetadata.RunImage.TopLayer, newBaseImage); err != nil { + return RebaseReport{}, errors.Wrap(err, "rebase app image") + } + + origMetadata.RunImage.TopLayer, err = newBaseImage.TopLayer() + if err != nil { + return RebaseReport{}, errors.Wrap(err, "get rebase run image top layer SHA") + } + + identifier, err := newBaseImage.Identifier() + if err != nil { + return RebaseReport{}, errors.Wrap(err, "get run image id or digest") + } + origMetadata.RunImage.Reference = identifier.String() + + data, err := json.Marshal(origMetadata) + if err != nil { + return RebaseReport{}, errors.Wrap(err, "marshall metadata") + } + + if err := appImage.SetLabel(platform.LayerMetadataLabel, string(data)); err != nil { + return RebaseReport{}, errors.Wrap(err, "set app image metadata label") + } + + hasPrefix := func(l string) bool { return strings.HasPrefix(l, "io.buildpacks.stack.") } + if err := syncLabels(newBaseImage, appImage, hasPrefix); err != nil { + return RebaseReport{}, errors.Wrap(err, "set stack labels") + } + + report := RebaseReport{} + report.Image, err = saveImage(appImage, additionalNames, r.Logger) + if err != nil { + return RebaseReport{}, err + } + if !r.supportsManifestSize() { + // unset manifest size in report.toml for old platform API versions + report.Image.ManifestSize = 0 + } + + return report, err +} + +func validateMixins(appImg, newBaseImg imgutil.Image) error { + var appImageMixins []string + var newBaseImageMixins []string + + if err := DecodeLabel(appImg, platform.MixinsLabel, &appImageMixins); err != nil { + return errors.Wrap(err, "get app image mixins") + } + + if err := DecodeLabel(newBaseImg, platform.MixinsLabel, &newBaseImageMixins); err != nil { + return errors.Wrap(err, "get run image mixins") + } + + appImageMixins = removeStagePrefixes(appImageMixins) + newBaseImageMixins = removeStagePrefixes(newBaseImageMixins) + + _, missing, _ := compare(newBaseImageMixins, appImageMixins) + + if len(missing) > 0 { + sort.Strings(missing) + return fmt.Errorf("missing required mixin(s): %s", strings.Join(missing, ", ")) + } + + return nil +} + +func (r *Rebaser) supportsManifestSize() bool { + return r.PlatformAPI.AtLeast("0.6") +} diff --git a/vendor/github.com/buildpacks/lifecycle/restorer.go b/vendor/github.com/buildpacks/lifecycle/restorer.go new file mode 100644 index 0000000000..b348ee4aa4 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/restorer.go @@ -0,0 +1,120 @@ +package lifecycle + +import ( + "path/filepath" + + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/cmd" + "github.com/buildpacks/lifecycle/layers" + "github.com/buildpacks/lifecycle/platform" +) + +type Restorer struct { + LayersDir string + Logger Logger + + Buildpacks []buildpack.GroupBuildpack + LayerMetadataRestorer LayerMetadataRestorer // Platform API >= 0.7 + LayersMetadata platform.LayersMetadata // Platform API >= 0.7 + Platform cmd.Platform +} + +// Restore restores metadata for launch and cache layers into the layers directory and attempts to restore layer data for cache=true layers, removing the layer when unsuccessful. +// If a usable cache is not provided, Restore will not restore any cache=true layer metadata. +func (r *Restorer) Restore(cache Cache) error { + cacheMeta, err := retrieveCacheMetadata(cache, r.Logger) + if err != nil { + return err + } + + useShaFiles := !r.restoresLayerMetadata() + layerSHAStore := NewLayerSHAStore(useShaFiles) + if r.restoresLayerMetadata() { + if err := r.LayerMetadataRestorer.Restore(r.Buildpacks, r.LayersMetadata, cacheMeta, layerSHAStore); err != nil { + return err + } + } + + var g errgroup.Group + for _, buildpack := range r.Buildpacks { + cachedLayers := cacheMeta.MetadataForBuildpack(buildpack.ID).Layers + + var cachedFn func(bpLayer) bool + if api.MustParse(buildpack.API).AtLeast("0.6") { + // On Buildpack API 0.6+, the .toml file never contains layer types information. + // The cache metadata is the only way to identify cache=true layers. + cachedFn = func(l bpLayer) bool { + layer, ok := cachedLayers[filepath.Base(l.path)] + return ok && layer.Cache + } + } else { + // On Buildpack API < 0.6, the .toml file contains layer types information. + // Prefer .toml file to cache metadata in case the cache was cleared between builds and + // the analyzer that wrote the files is on a previous version of the lifecycle, that doesn't cross-reference the cache metadata when writing the files. + // This allows the restorer to cleanup .toml files for layers that are not actually in the cache. + cachedFn = forCached + } + + buildpackDir, err := readBuildpackLayersDir(r.LayersDir, buildpack, r.Logger) + if err != nil { + return errors.Wrapf(err, "reading buildpack layer directory") + } + foundLayers := buildpackDir.findLayers(cachedFn) + + for _, bpLayer := range foundLayers { + cachedLayer, exists := cachedLayers[bpLayer.name()] + if !exists { + r.Logger.Infof("Removing %q, not in cache", bpLayer.Identifier()) + if err := bpLayer.remove(); err != nil { + return errors.Wrapf(err, "removing layer") + } + continue + } + + layerSha, err := layerSHAStore.get(buildpack.ID, bpLayer) + if err != nil { + return err + } + + if layerSha != cachedLayer.SHA { + r.Logger.Infof("Removing %q, wrong sha", bpLayer.Identifier()) + r.Logger.Debugf("Layer sha: %q, cache sha: %q", layerSha, cachedLayer.SHA) + if err := bpLayer.remove(); err != nil { + return errors.Wrapf(err, "removing layer") + } + } else { + r.Logger.Infof("Restoring data for %q from cache", bpLayer.Identifier()) + g.Go(func() error { + return r.restoreLayer(cache, cachedLayer.SHA) + }) + } + } + } + if err := g.Wait(); err != nil { + return errors.Wrap(err, "restoring data") + } + return nil +} + +func (r *Restorer) restoresLayerMetadata() bool { + return api.MustParse(r.Platform.API()).AtLeast("0.7") +} + +func (r *Restorer) restoreLayer(cache Cache, sha string) error { + // Sanity check to prevent panic. + if cache == nil { + return errors.New("restoring layer: cache not provided") + } + r.Logger.Debugf("Retrieving data for %q", sha) + rc, err := cache.RetrieveLayer(sha) + if err != nil { + return err + } + defer rc.Close() + + return layers.Extract(rc, "") +} diff --git a/vendor/github.com/buildpacks/lifecycle/save.go b/vendor/github.com/buildpacks/lifecycle/save.go new file mode 100644 index 0000000000..ad79dca3e3 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/save.go @@ -0,0 +1,95 @@ +package lifecycle + +import ( + "fmt" + + "github.com/pkg/errors" + + "github.com/buildpacks/imgutil" + "github.com/buildpacks/imgutil/local" + "github.com/buildpacks/imgutil/remote" + + "github.com/buildpacks/lifecycle/platform" +) + +func saveImage(image imgutil.Image, additionalNames []string, logger Logger) (platform.ImageReport, error) { + var saveErr error + imageReport := platform.ImageReport{} + logger.Infof("Saving %s...\n", image.Name()) + if err := image.Save(additionalNames...); err != nil { + var ok bool + if saveErr, ok = err.(imgutil.SaveError); !ok { + return platform.ImageReport{}, errors.Wrap(err, "saving image") + } + } + + id, idErr := image.Identifier() + if idErr != nil { + if saveErr != nil { + return platform.ImageReport{}, &MultiError{Errors: []error{idErr, saveErr}} + } + return platform.ImageReport{}, idErr + } + + logger.Infof("*** Images (%s):\n", shortID(id)) + for _, n := range append([]string{image.Name()}, additionalNames...) { + if ok, message := getSaveStatus(saveErr, n); !ok { + logger.Infof(" %s - %s\n", n, message) + } else { + logger.Infof(" %s\n", n) + imageReport.Tags = append(imageReport.Tags, n) + } + } + switch v := id.(type) { + case local.IDIdentifier: + imageReport.ImageID = v.String() + logger.Debugf("\n*** Image ID: %s\n", v.String()) + case remote.DigestIdentifier: + imageReport.Digest = v.Digest.DigestStr() + logger.Debugf("\n*** Digest: %s\n", v.Digest.DigestStr()) + default: + } + + manifestSize, sizeErr := image.ManifestSize() + if sizeErr != nil { + // ignore the manifest size if it's unavailable + logger.Infof("*** Manifest size is unavailable: %s\n", sizeErr.Error()) + } else if manifestSize != 0 { + imageReport.ManifestSize = manifestSize + logger.Debugf("\n*** Manifest Size: %d\n", manifestSize) + } + + return imageReport, saveErr +} + +type MultiError struct { + Errors []error +} + +func (me *MultiError) Error() string { + return fmt.Sprintf("failed with multiple errors %+v", me.Errors) +} + +func shortID(identifier imgutil.Identifier) string { + switch v := identifier.(type) { + case local.IDIdentifier: + return TruncateSha(v.String()) + case remote.DigestIdentifier: + return v.Digest.DigestStr() + default: + return v.String() + } +} + +func getSaveStatus(err error, imageName string) (bool, string) { + if err != nil { + if saveErr, ok := err.(imgutil.SaveError); ok { + for _, d := range saveErr.Errors { + if d.ImageName == imageName { + return false, d.Cause.Error() + } + } + } + } + return true, "" +} diff --git a/vendor/github.com/buildpacks/lifecycle/utils.go b/vendor/github.com/buildpacks/lifecycle/utils.go new file mode 100644 index 0000000000..0f18ffaf25 --- /dev/null +++ b/vendor/github.com/buildpacks/lifecycle/utils.go @@ -0,0 +1,149 @@ +package lifecycle + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + + "github.com/BurntSushi/toml" + "github.com/buildpacks/imgutil" + "github.com/pkg/errors" + + "github.com/buildpacks/lifecycle/buildpack" +) + +func WriteTOML(path string, data interface{}) error { + if err := os.MkdirAll(filepath.Dir(path), 0777); err != nil { + return err + } + f, err := os.Create(path) + if err != nil { + return err + } + defer f.Close() + return toml.NewEncoder(f).Encode(data) +} + +func ReadGroup(path string) (buildpack.Group, error) { + var group buildpack.Group + _, err := toml.DecodeFile(path, &group) + return group, err +} + +func ReadOrder(path string) (buildpack.Order, error) { + var order struct { + Order buildpack.Order `toml:"order"` + } + _, err := toml.DecodeFile(path, &order) + return order.Order, err +} + +func TruncateSha(sha string) string { + rawSha := strings.TrimPrefix(sha, "sha256:") + if len(sha) > 12 { + return rawSha[0:12] + } + return rawSha +} + +func DecodeLabel(image imgutil.Image, label string, v interface{}) error { + if !image.Found() { + return nil + } + contents, err := image.Label(label) + if err != nil { + return errors.Wrapf(err, "retrieving label '%s' for image '%s'", label, image.Name()) + } + if contents == "" { + return nil + } + if err := json.Unmarshal([]byte(contents), v); err != nil { + return errors.Wrapf(err, "failed to unmarshal context of label '%s'", label) + } + return nil +} + +func removeStagePrefixes(mixins []string) []string { + var result []string + for _, m := range mixins { + s := strings.SplitN(m, ":", 2) + if len(s) == 1 { + result = append(result, s[0]) + } else { + result = append(result, s[1]) + } + } + return result +} + +// compare performs a set comparison between two slices. `extra` represents elements present in +// `strings1` but not `strings2`. `missing` represents elements present in `strings2` that are +// missing from `strings1`. `common` represents elements present in both slices. Since the input +// slices are treated as sets, duplicates will be removed in any outputs. +// from: https://github.com/buildpacks/pack/blob/main/internal/stringset/stringset.go +func compare(strings1, strings2 []string) (extra []string, missing []string, common []string) { + set1 := map[string]struct{}{} + set2 := map[string]struct{}{} + for _, s := range strings1 { + set1[s] = struct{}{} + } + for _, s := range strings2 { + set2[s] = struct{}{} + } + + for s := range set1 { + if _, ok := set2[s]; !ok { + extra = append(extra, s) + continue + } + common = append(common, s) + } + + for s := range set2 { + if _, ok := set1[s]; !ok { + missing = append(missing, s) + } + } + + return extra, missing, common +} + +func syncLabels(sourceImg imgutil.Image, destImage imgutil.Image, test func(string) bool) error { + if err := removeLabels(destImage, test); err != nil { + return err + } + return copyLabels(sourceImg, destImage, test) +} + +func removeLabels(image imgutil.Image, test func(string) bool) error { + labels, err := image.Labels() + if err != nil { + return err + } + + for label := range labels { + if test(label) { + if err := image.RemoveLabel(label); err != nil { + return errors.Wrapf(err, "failed to remove label '%s'", label) + } + } + } + return nil +} + +func copyLabels(fromImage imgutil.Image, destImage imgutil.Image, test func(string) bool) error { + fromLabels, err := fromImage.Labels() + if err != nil { + return err + } + + for label, labelValue := range fromLabels { + if test(label) { + if err := destImage.SetLabel(label, labelValue); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/buildpacks/pack/.gitignore b/vendor/github.com/buildpacks/pack/.gitignore new file mode 100644 index 0000000000..30eb369432 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/.gitignore @@ -0,0 +1,16 @@ +.pack/ +/pack +out/ +benchmarks.test +*.out + +# Jetbrains Goland +.idea/ + +# Build outputs +artifacts/ +.DS_Store + +# Travis unencrypted file +.travis/key.pem + diff --git a/vendor/github.com/buildpacks/pack/.gitpod.yml b/vendor/github.com/buildpacks/pack/.gitpod.yml new file mode 100644 index 0000000000..15c2e1f9ab --- /dev/null +++ b/vendor/github.com/buildpacks/pack/.gitpod.yml @@ -0,0 +1,14 @@ + +tasks: + - init: make build +github: + prebuilds: + master: true + branches: true + pullRequests: true + pullRequestsFromForks: true + addCheck: true + +vscode: + extensions: + - golang.go \ No newline at end of file diff --git a/vendor/github.com/buildpacks/pack/CODEOWNERS b/vendor/github.com/buildpacks/pack/CODEOWNERS new file mode 100644 index 0000000000..3daccc65fa --- /dev/null +++ b/vendor/github.com/buildpacks/pack/CODEOWNERS @@ -0,0 +1 @@ +* @buildpacks/platform-maintainers diff --git a/vendor/github.com/buildpacks/pack/CONTRIBUTING.md b/vendor/github.com/buildpacks/pack/CONTRIBUTING.md new file mode 100644 index 0000000000..1cdbc6aef1 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/CONTRIBUTING.md @@ -0,0 +1,84 @@ +We're glad you are interested in contributing to this project. We hope that this +document helps you get started. + +## Policies + +This repository adheres to the following project policies: + +- [Code of Conduct][code-of-conduct] - How we should act with each other. +- [Contributing][contributing] - General contributing standards. +- [Security][security] - Reporting security concerns. +- [Support][support] - Getting support. + +## Contributing to this repository + +### Development + +Aside from the policies above, you may find [DEVELOPMENT.md](DEVELOPMENT.md) to provide specific helpful detail +to assist you while developing in this repository. + +We welcome any and all contributions! The issues we are prioritizing are visible in the repository [milestones](https://github.com/buildpacks/pack/milestones), and we especially welcome contributions to that. One good place to start contributing is in our [documentation](https://github.com/buildpacks/docs/issues), though you are welcome to start in this repository! + +#### Preparing for a Pull Request + +After making all the changes but before creating a [Pull Request][pull-request-process], you should run +`make prepare-for-pr`. This command runs a set of other tasks that resolve or report any simple issues that would +otherwise arise during the pull request review process. + +### User Acceptance on a Pull Request + +Running user acceptance on a pull request is just as critical as reviewing the code changes. It allows you, a contributor and user, direct insight into how a feature works and allows for you to provide feedback into what could be improved. + +#### Downloading PR binaries + +1. On GitHub's Pull Request view, click on the **Checks** tab. +2. On the top-right, click **Artifacts**. +3. Click on the zip file for the platform you are running. + +#### Setup + +1. Unzip binary: + ```shell + unzip pack-{{PLATFORM}}.zip + ``` +2. Enable execution of binary _(macOS/Linux only)_: + ```shell + chmod +x ./pack + ``` + + > For macOS, you might need to allow your terminal to be able to execute applications from unverified developers. See [Apple Support](https://support.apple.com/en-us/HT202491). + > + > A quick solution is to add exception to the downloaded pack binary: `sudo spctl --add -v ./pack` +3. You should now be able to execute pack via: + - macOS: `./pack` + - Linux: `./pack` + - Windows: `pack.exe` + + +#### Writing Feedback + +When providing feedback please provide a succinct title, a summary of the observation, what you expected, and some output or screenshots. + +Here's a simple template you can use: + +```text + +#### + + + +###### Expected + + + +###### Output + + +``` + + +[code-of-conduct]: https://github.com/buildpacks/.github/blob/main/CODE_OF_CONDUCT.md +[contributing]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTING.md +[security]: https://github.com/buildpacks/.github/blob/main/SECURITY.md +[support]: https://github.com/buildpacks/.github/blob/main/SUPPORT.md +[pull-request-process]: https://github.com/buildpacks/.github/blob/main/CONTRIBUTIONS.md#pull-request-process diff --git a/vendor/github.com/buildpacks/pack/DEVELOPMENT.md b/vendor/github.com/buildpacks/pack/DEVELOPMENT.md new file mode 100644 index 0000000000..16e3e3548a --- /dev/null +++ b/vendor/github.com/buildpacks/pack/DEVELOPMENT.md @@ -0,0 +1,114 @@ +# Development + +## Prerequisites + +* [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) + * macOS: _(built-in)_ + * Windows: + * `choco install git -y` + * `git config --global core.autocrlf false` +* [Go](https://golang.org/doc/install) + * macOS: `brew install go` + * Windows: `choco install golang -y` +* [Docker](https://www.docker.com/products/docker-desktop) +* Make (and build tools) + * macOS: `xcode-select --install` + * Windows: + * `choco install cygwin make -y` + * `[Environment]::SetEnvironmentVariable("PATH", "C:\tools\cygwin\bin;$ENV:PATH", "MACHINE")` + +Alternatively, you can use Gitpod to run pre-configured dev envrionment in the cloud right from your browser + +[![Open in Gitpod](https://gitpod.io/button/open-in-gitpod.svg)](https://gitpod.io/#https://github.com/buildpacks/pack) + + +### Windows Caveats + +* Symlinks - Some of our tests attempt to create symlinks. On Windows, this requires the [permission to be provided](https://stackoverflow.com/a/24353758). + +## Tasks + +### Building + +To build pack: +``` +make build +``` + +This will output the binary to the directory `out/`. + +Options: + +| ENV_VAR | Description | Default | +|--------------|------------------------------------------------------------------------|---------| +| GOCMD | Change the `go` executable. For example, [richgo][rgo] for testing. | go | +| PACK_BIN | Change the name or location of the binary relative to `out/`. | pack | +| PACK_VERSION | Tell `pack` what version to consider itself | `dev` | + +[rgo]: https://github.com/kyoh86/richgo + +_NOTE: This project uses [go modules](https://github.com/golang/go/wiki/Modules) for dependency management._ + +### Testing + +To run unit and integration tests: +```shell +make unit +``` + +To run acceptance tests: +```shell +make acceptance +``` + +Alternately, to run all tests: +```shell +make test +``` + +To run our full acceptance suite (including cross-compatibility for n-1 `pack` and `lifecycl`): +```shell +make acceptance-all +``` + +### Tidy + +To format the code: +```shell +make format +``` + +To tidy up the codebase and dependencies: +```shell +make tidy +``` + +### Verification + +To verify formatting and code quality: +```shell +make verify +``` + +### Prepare for PR + +Runs various checks to ensure compliance: +```shell +make prepare-for-pr +``` + +### Acceptance Tests +Some options users can provide to our acceptance tests are: + +| ENV_VAR | Description | Default | +|--------------|------------------------------------------------------------------------|---------| +| ACCEPTANCE_SUITE_CONFIG | A set of configurations for how to run the acceptance tests, describing the version of `pack` used for testing, the version of `pack` used to create the builders used in the test, and the version of `lifecycle` binaries used to test with Github | `[{"pack": "current", "pack_create_builder": "current", "lifecycle": "default"}]'` | +| COMPILE_PACK_WITH_VERSION | Tell `pack` what version to consider itself | `dev` | +| GITHUB_TOKEN | A Github Token, used when downloading `pack` and `lifecycle` releases from Github during the test setup | "" | +| LIFECYCLE_IMAGE | Image reference to be used in untrusted builder workflows | buildpacksio/lifecycle: | +| LIFECYCLE_PATH | Path to a `.tgz` file filled with a set of `lifecycle` binaries | The Github release for the default version of lifecycle in `pack` | +| PACK_PATH | Path to a `pack` executable. | A compiled version of the current branch | +| PREVIOUS_LIFECYCLE_IMAGE | Image reference to be used in untrusted builder workflows, used to test compatibility of `pack` with the n-1 version of the `lifecycle` | buildpacksio/lifecycle:, buildpacksio/lifecycle: | +| PREVIOUS_LIFECYCLE_PATH | Path to a `.tgz` file filled with a set of `lifecycle` binaries, used to test compatibility of `pack` with the n-1 version of the `lifecycle` | The Github release for n-1 release of `lifecycle` | +| PREVIOUS_PACK_FIXTURES_PATH | Path to a set of fixtures, used to override the most up-to-date fixtures, in case of changed functionality | `acceptance/testdata/pack_previous_fixtures_overrides` | +| PREVIOUS_PACK_PATH | Path to a `pack` executable, used to test compatibility with n-1 version of `pack` | The most recent release from `pack`'s Github release | diff --git a/vendor/github.com/buildpacks/pack/LICENSE b/vendor/github.com/buildpacks/pack/LICENSE new file mode 100644 index 0000000000..44eae60e00 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 The Cloud Native Buildpacks Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/buildpacks/pack/Makefile b/vendor/github.com/buildpacks/pack/Makefile new file mode 100644 index 0000000000..707a27e633 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/Makefile @@ -0,0 +1,158 @@ +ifeq ($(OS),Windows_NT) +SHELL:=cmd.exe + +# Need BLANK due to makefile parsing of `\` +# (see: https://stackoverflow.com/questions/54733231/how-to-escape-a-backslash-in-the-end-to-mean-literal-backslash-in-makefile/54733416#54733416) +BLANK:= + +# Define variable named `/` to represent OS path separator (usable as `$/` in this file) +/:=\$(BLANK) +CAT=type +RMRF=rmdir /q /s +SRC=$(shell dir /q /s /b *.go | findstr /v $/out$/) +GOIMPORTS_DIFF_OPTION="-l" # Windows can't do diff-mode because it's missing the "diff" binary +PACK_BIN?=pack.exe +else +/:=/ +CAT=cat +RMRF=rm -rf +SRC=$(shell find . -type f -name '*.go' -not -path "*/out/*") +GOIMPORTS_DIFF_OPTION:="-d" +PACK_BIN?=pack +endif + +ACCEPTANCE_TIMEOUT?=$(TEST_TIMEOUT) +ARCHIVE_NAME=pack-$(PACK_VERSION) +GOCMD?=go +GOFLAGS?= +GOTESTFLAGS?=-v -count=1 -parallel=1 +PACKAGE_BASE=github.com/buildpacks/pack +PACK_GITSHA1=$(shell git rev-parse --short=7 HEAD) +PACK_VERSION?=0.0.0 +TEST_TIMEOUT?=1200s +UNIT_TIMEOUT?=$(TEST_TIMEOUT) +NO_DOCKER?= + +clean_build := $(strip ${PACK_BUILD}) +clean_sha := $(strip ${PACK_GITSHA1}) + +# append build number and git sha to version, if not-empty +ifneq ($(and $(clean_build),$(clean_sha)),) +PACK_VERSION:=${PACK_VERSION}+git-${clean_sha}.build-${clean_build} +else ifneq ($(clean_build),) +PACK_VERSION:=${PACK_VERSION}+build-${clean_build} +else ifneq ($(clean_sha),) +PACK_VERSION:=${PACK_VERSION}+git-${clean_sha} +endif + +export GOFLAGS:=$(GOFLAGS) +export CGO_ENABLED=0 + +BINDIR:=/usr/bin/ + +# this target must be listed first in order for it to be a defualt target, +# so that ubuntu_ppa's may be constructed using default build tools. +build: out + @echo "> Building..." + $(GOCMD) build -ldflags "-s -w -X 'github.com/buildpacks/pack.Version=${PACK_VERSION}' -extldflags ${LDFLAGS}" -trimpath -o ./out/$(PACK_BIN) -a ./cmd/pack + +all: clean verify test build + +# used by apt-get install when installing ubuntu ppa. +# move pack binary onto a path location. +install: + mkdir -p ${DESTDIR}${BINDIR} + cp ./out/$(PACK_BIN) ${DESTDIR}${BINDIR}/ + +mod-tidy: + $(GOCMD) mod tidy + cd tools && $(GOCMD) mod tidy + +tidy: mod-tidy format + +package: out + tar czf .$/out$/$(ARCHIVE_NAME).tgz -C .$/out$/ $(PACK_BIN) + +install-mockgen: + @echo "> Installing mockgen..." + cd tools && $(GOCMD) install github.com/golang/mock/mockgen + +install-goimports: + @echo "> Installing goimports..." + cd tools && $(GOCMD) install golang.org/x/tools/cmd/goimports + +format: install-goimports + @echo "> Formating code..." + @goimports -l -w -local ${PACKAGE_BASE} ${SRC} + @go run tools/pedantic_imports/main.go ${PACKAGE_BASE} ${SRC} + +install-golangci-lint: + @echo "> Installing golangci-lint..." + cd tools && $(GOCMD) install github.com/golangci/golangci-lint/cmd/golangci-lint + +lint: install-golangci-lint + @echo "> Linting code..." + @golangci-lint run -c golangci.yaml + +test: unit acceptance + +# append coverage arguments +ifeq ($(TEST_COVERAGE), 1) +unit: GOTESTFLAGS:=$(GOTESTFLAGS) -coverprofile=./out/tests/coverage-unit.txt -covermode=atomic +endif +ifeq ($(NO_DOCKER),) +unit: GOTESTFLAGS:=$(GOTESTFLAGS) --tags=example +endif +unit: out + @echo "> Running unit/integration tests..." + $(GOCMD) test $(GOTESTFLAGS) -timeout=$(UNIT_TIMEOUT) ./... + +acceptance: out + @echo "> Running acceptance tests..." + $(GOCMD) test $(GOTESTFLAGS) -timeout=$(ACCEPTANCE_TIMEOUT) -tags=acceptance ./acceptance + +acceptance-all: export ACCEPTANCE_SUITE_CONFIG:=$(shell $(CAT) .$/acceptance$/testconfig$/all.json) +acceptance-all: + @echo "> Running acceptance tests..." + $(GOCMD) test $(GOTESTFLAGS) -timeout=$(ACCEPTANCE_TIMEOUT) -tags=acceptance ./acceptance + +clean: + @echo "> Cleaning workspace..." + @$(RMRF) .$/out benchmarks.test || (exit 0) + +verify: verify-format lint + +generate: install-mockgen + @echo "> Generating mocks..." + $(GOCMD) generate ./... + +verify-format: install-goimports + @echo "> Verifying format..." + $(if $(shell goimports -l -local ${PACKAGE_BASE} ${SRC}), @echo ERROR: Format verification failed! && goimports ${GOIMPORTS_DIFF_OPTION} -local ${PACKAGE_BASE} ${SRC} && exit 1) + +prepare-for-pr: tidy verify test + @git diff-index --quiet HEAD -- ||\ + (echo "-----------------" &&\ + echo "NOTICE: There are some files that have not been committed." &&\ + echo "-----------------\n" &&\ + git status &&\ + echo "\n-----------------" &&\ + echo "NOTICE: There are some files that have not been committed." &&\ + echo "-----------------\n" &&\ + exit 0) + +benchmark: out + @echo "> Running Benchmarks" + $(GOCMD) test -run=^$ -bench=. -benchtime=1s -benchmem -memprofile=./out/bench_mem.out -cpuprofile=./out/bench_cpu.out -tags=benchmarks ./benchmarks/ -v +# NOTE: You can analyze the results, using go tool pprof. For instance, you can start a server to see a graph of the cpu usage by running +# go tool pprof -http=":8082" out/bench_cpu.out. Alternatively, you can run go tool pprof, and in the ensuing cli, run +# commands like top10 or web to dig down into the cpu and memory usage +# For more, see https://blog.golang.org/pprof + +# NOTE: Windows doesn't support `-p` +out: + @mkdir out || (exit 0) + mkdir out$/tests || (exit 0) + + +.PHONY: clean build format imports lint test unit acceptance prepare-for-pr verify verify-format benchmark diff --git a/vendor/github.com/buildpacks/pack/README.md b/vendor/github.com/buildpacks/pack/README.md new file mode 100644 index 0000000000..495172845d --- /dev/null +++ b/vendor/github.com/buildpacks/pack/README.md @@ -0,0 +1,45 @@ +# pack - Buildpack CLI + +[![Build results](https://github.com/buildpacks/pack/workflows/build/badge.svg)](https://github.com/buildpacks/pack/actions) +[![Go Report Card](https://goreportcard.com/badge/github.com/buildpacks/pack)](https://goreportcard.com/report/github.com/buildpacks/pack) +[![codecov](https://codecov.io/gh/buildpacks/pack/branch/main/graph/badge.svg)](https://codecov.io/gh/buildpacks/pack) +[![GoDoc](https://godoc.org/github.com/buildpacks/pack?status.svg)](https://godoc.org/github.com/buildpacks/pack) +[![GitHub license](https://img.shields.io/github/license/buildpacks/pack)](https://github.com/buildpacks/pack/blob/main/LICENSE) +[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4748/badge)](https://bestpractices.coreinfrastructure.org/projects/4748) +[![Slack](https://img.shields.io/badge/slack-join-ff69b4.svg?logo=slack)](https://slack.buildpacks.io/) +[![Gitpod ready-to-code](https://img.shields.io/badge/Gitpod-ready--to--code-blue?logo=gitpod)](https://gitpod.io/#https://github.com/buildpacks/pack) + +`pack` makes it easy for... +- [**App Developers**][app-dev] to use buildpacks to convert code into runnable images. +- [**Buildpack Authors**][bp-author] to develop and package buildpacks for distribution. +- [**Operators**][operator] to package buildpacks for distribution and maintain applications. + +## Usage + + + +## Getting Started +Get started by running through our tutorial: [An App’s Brief Journey from Source to Image][getting-started] + +## Contributing +- [CONTRIBUTING](CONTRIBUTING.md) - Information on how to contribute, including the pull request process. +- [DEVELOPMENT](DEVELOPMENT.md) - Further detail to help you during the development process. +- [RELEASE](RELEASE.md) - Further details about our release process. + +## Documentation +Check out the command line documentation [here][pack-docs] + +## Specifications +`pack` is a CLI implementation of the [Platform Interface Specification][platform-spec] for [Cloud Native Buildpacks][buildpacks.io]. + +To learn more about the details, check out the [specs repository][specs]. + +[app-dev]: https://buildpacks.io/docs/app-developer-guide/ +[bp-author]: https://buildpacks.io/docs/buildpack-author-guide/ +[operator]: https://buildpacks.io/docs/operator-guide/ +[buildpacks.io]: https://buildpacks.io/ +[install-pack]: https://buildpacks.io/docs/install-pack/ +[getting-started]: https://buildpacks.io/docs/app-journey +[specs]: https://github.com/buildpacks/spec/ +[platform-spec]: https://github.com/buildpacks/spec/blob/main/platform.md +[pack-docs]: https://buildpacks.io/docs/tools/pack/cli/pack/ diff --git a/vendor/github.com/buildpacks/pack/RELEASE.md b/vendor/github.com/buildpacks/pack/RELEASE.md new file mode 100644 index 0000000000..a4d35cddae --- /dev/null +++ b/vendor/github.com/buildpacks/pack/RELEASE.md @@ -0,0 +1,63 @@ +# Release Process + +Pack follows a 6 week release cadence, composed of 3 phases: + - [Development](#development) + - [Feature Complete](#feature-complete) + - [Release Finalization](#release-finalization) + +## Roles + +#### Release Manager + +One of the [maintainers][maintainers] is designated as the release manager. They communicate the release status to the working group meetings, schedule additional meetings with the `pack` [maintainers][maintainers] as needed, and finalize the release. They also take care of whatever release needs may arise. + +## Phases + +### Development + +Our development flow is detailed in [Development](DEVELOPMENT.md). + +### Feature Complete + +5 business days prior to a scheduled release, we enter `feature complete`. + +During this period, a **Release Candidate** (RC) is published and used for further User Acceptance testing (`UAT`). Furthermore, additional RCs may be published based on assessment by the `pack` [maintainers][maintainers] of the **impact**, **effort** and **risk** of including the changes in the upcoming release. Any other changes may be merged into the `main` branch through the normal process, and will make it into the next release. + +To produce the release candidate the [release manager](#release-manager) will: +- Create a new release branch in form `release/-rc` yielding a draft GitHub release to be published. +- Publish the [GitHub release][release]: + - Tag release branch as `v-rc`. + - Release should be marked as "pre-release". + - The GitHub release will contain the following: + - **artifacts** + - **release notes** + - The release notes should be edited and cleaned +- Merge the release branch into `main`. + +### Release Finalization + +The [release manager](#release-manager) will: +- Create a new release branch in form `release/` yielding a draft GitHub release to be published. +- Publish the [GitHub release][release] while tagging the release branch as `v`. + - Tag release branch as `v`. + - The GitHub release will contain the following: + - **artifacts** + - **release notes** + - **migration guide** (if necessary) +- Merge the release branch into `main`. +- Send out release notifications, if deemed necessary, on + - The [cncf-buildpacks mailing list](https://lists.cncf.io/g/cncf-buildpacks) + - Twitter + +## Manual Releasing + +We release pack to a number of systems, including `homebrew`, `docker`, and `archlinux`. All of our delivery pipelines +have workflow_dispatch triggers, if a maintainer needs to manually trigger them. To activate it, go to the +[actions page](https://github.com/buildpacks/pack/actions), and select the desired workflow. Run it by providing the pack +version to release, in the format `v`. + +_For more information, see the [release process RFC][release-process]_ + +[maintainers]: https://github.com/buildpacks/community/blob/main/TEAMS.md#platform-team +[release-process]: https://github.com/buildpacks/rfcs/blob/main/text/0039-release-process.md#change-control-board +[release]: https://github.com/buildpacks/pack/releases diff --git a/vendor/github.com/buildpacks/pack/builder/config_reader.go b/vendor/github.com/buildpacks/pack/builder/config_reader.go new file mode 100644 index 0000000000..66625fbc5b --- /dev/null +++ b/vendor/github.com/buildpacks/pack/builder/config_reader.go @@ -0,0 +1,113 @@ +package builder + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/BurntSushi/toml" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/config" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/dist" +) + +// Config is a builder configuration file +type Config struct { + Description string `toml:"description"` + Buildpacks BuildpackCollection `toml:"buildpacks"` + Order dist.Order `toml:"order"` + Stack StackConfig `toml:"stack"` + Lifecycle LifecycleConfig `toml:"lifecycle"` +} + +// BuildpackCollection is a list of BuildpackConfigs +type BuildpackCollection []BuildpackConfig + +// BuildpackConfig details the configuration of a Buildpack +type BuildpackConfig struct { + dist.BuildpackInfo + dist.ImageOrURI +} + +func (c *BuildpackConfig) DisplayString() string { + if c.BuildpackInfo.FullName() != "" { + return c.BuildpackInfo.FullName() + } + + return c.ImageOrURI.DisplayString() +} + +// StackConfig details the configuration of a Stack +type StackConfig struct { + ID string `toml:"id"` + BuildImage string `toml:"build-image"` + RunImage string `toml:"run-image"` + RunImageMirrors []string `toml:"run-image-mirrors,omitempty"` +} + +// LifecycleConfig details the configuration of the Lifecycle +type LifecycleConfig struct { + URI string `toml:"uri"` + Version string `toml:"version"` +} + +// ReadConfig reads a builder configuration from the file path provided and returns the +// configuration along with any warnings encountered while parsing +func ReadConfig(path string) (config Config, warnings []string, err error) { + file, err := os.Open(filepath.Clean(path)) + if err != nil { + return Config{}, nil, errors.Wrap(err, "opening config file") + } + defer file.Close() + + config, err = parseConfig(file) + if err != nil { + return Config{}, nil, errors.Wrapf(err, "parse contents of '%s'", path) + } + + if len(config.Order) == 0 { + warnings = append(warnings, fmt.Sprintf("empty %s definition", style.Symbol("order"))) + } + + return config, warnings, nil +} + +// ValidateConfig validates the config +func ValidateConfig(c Config) error { + if c.Stack.ID == "" { + return errors.New("stack.id is required") + } + + if c.Stack.BuildImage == "" { + return errors.New("stack.build-image is required") + } + + if c.Stack.RunImage == "" { + return errors.New("stack.run-image is required") + } + + return nil +} + +// parseConfig reads a builder configuration from file +func parseConfig(file *os.File) (Config, error) { + builderConfig := Config{} + tomlMetadata, err := toml.DecodeReader(file, &builderConfig) + if err != nil { + return Config{}, errors.Wrap(err, "decoding toml contents") + } + + undecodedKeys := tomlMetadata.Undecoded() + if len(undecodedKeys) > 0 { + unknownElementsMsg := config.FormatUndecodedKeys(undecodedKeys) + + return Config{}, errors.Errorf("%s in %s", + unknownElementsMsg, + style.Symbol(file.Name()), + ) + } + + return builderConfig, nil +} diff --git a/vendor/github.com/buildpacks/pack/builder/detection_order.go b/vendor/github.com/buildpacks/pack/builder/detection_order.go new file mode 100644 index 0000000000..2063f02645 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/builder/detection_order.go @@ -0,0 +1,18 @@ +package builder + +import ( + "github.com/buildpacks/pack/pkg/dist" +) + +type DetectionOrderEntry struct { + dist.BuildpackRef `yaml:",inline"` + Cyclical bool `json:"cyclic,omitempty" yaml:"cyclic,omitempty" toml:"cyclic,omitempty"` + GroupDetectionOrder DetectionOrder `json:"buildpacks,omitempty" yaml:"buildpacks,omitempty" toml:"buildpacks,omitempty"` +} + +type DetectionOrder []DetectionOrderEntry + +const ( + OrderDetectionMaxDepth = -1 + OrderDetectionNone = 0 +) diff --git a/vendor/github.com/buildpacks/pack/buildpack.yml b/vendor/github.com/buildpacks/pack/buildpack.yml new file mode 100644 index 0000000000..4a48bef943 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/buildpack.yml @@ -0,0 +1,9 @@ +go: + targets: + - ./cmd/pack + + build: + # The go.build.flags property allows you to override the default build + # flags when compiling your program. + flags: + - -ldflags="-s -w -X 'github.com/buildpacks/pack.Version=#{PACK_VERSION}#'" diff --git a/vendor/github.com/buildpacks/pack/buildpackage/config_reader.go b/vendor/github.com/buildpacks/pack/buildpackage/config_reader.go new file mode 100644 index 0000000000..8dbbafa276 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/buildpackage/config_reader.go @@ -0,0 +1,116 @@ +package buildpackage + +import ( + "path/filepath" + + "github.com/BurntSushi/toml" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/config" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/dist" +) + +const defaultOS = "linux" + +// Config encapsulates the possible configuration options for buildpackage creation. +type Config struct { + Buildpack dist.BuildpackURI `toml:"buildpack"` + Dependencies []dist.ImageOrURI `toml:"dependencies"` + Platform dist.Platform `toml:"platform"` +} + +func DefaultConfig() Config { + return Config{ + Buildpack: dist.BuildpackURI{ + URI: ".", + }, + Platform: dist.Platform{ + OS: defaultOS, + }, + } +} + +// NewConfigReader returns an instance of ConfigReader. It does not take any parameters. +func NewConfigReader() *ConfigReader { + return &ConfigReader{} +} + +// ConfigReader implements a Read method for buildpackage configuration which parses and validates buildpackage +// configuration from a toml file. +type ConfigReader struct{} + +// Read reads and validates a buildpackage configuration from the file path provided and returns the +// configuration and any error that occurred during reading or validation. +func (r *ConfigReader) Read(path string) (Config, error) { + packageConfig := Config{} + + tomlMetadata, err := toml.DecodeFile(path, &packageConfig) + if err != nil { + return packageConfig, errors.Wrap(err, "decoding toml") + } + + undecodedKeys := tomlMetadata.Undecoded() + if len(undecodedKeys) > 0 { + unknownElementsMsg := config.FormatUndecodedKeys(undecodedKeys) + + return packageConfig, errors.Errorf("%s in %s", + unknownElementsMsg, + style.Symbol(path), + ) + } + + if packageConfig.Buildpack.URI == "" { + return packageConfig, errors.Errorf("missing %s configuration", style.Symbol("buildpack.uri")) + } + + if packageConfig.Platform.OS == "" { + packageConfig.Platform.OS = defaultOS + } + + if packageConfig.Platform.OS != "linux" && packageConfig.Platform.OS != "windows" { + return packageConfig, errors.Errorf("invalid %s configuration: only [%s, %s] is permitted, found %s", + style.Symbol("platform.os"), style.Symbol("linux"), style.Symbol("windows"), style.Symbol(packageConfig.Platform.OS)) + } + + configDir, err := filepath.Abs(filepath.Dir(path)) + if err != nil { + return packageConfig, err + } + + if err := validateURI(packageConfig.Buildpack.URI, configDir); err != nil { + return packageConfig, err + } + + for _, dep := range packageConfig.Dependencies { + if dep.URI != "" && dep.ImageName != "" { + return packageConfig, errors.Errorf( + "dependency configured with both %s and %s", + style.Symbol("uri"), + style.Symbol("image"), + ) + } + + if dep.URI != "" { + if err := validateURI(dep.URI, configDir); err != nil { + return packageConfig, err + } + } + } + + return packageConfig, nil +} + +func validateURI(uri, relativeBaseDir string) error { + locatorType, err := buildpack.GetLocatorType(uri, relativeBaseDir, nil) + if err != nil { + return err + } + + if locatorType == buildpack.InvalidLocator { + return errors.Errorf("invalid locator %s", style.Symbol(uri)) + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/codecov.yml b/vendor/github.com/buildpacks/pack/codecov.yml new file mode 100644 index 0000000000..c77a18bf3a --- /dev/null +++ b/vendor/github.com/buildpacks/pack/codecov.yml @@ -0,0 +1,18 @@ +codecov: + notify: + after_n_builds: 4 + +coverage: + round: up + status: + project: + default: + threshold: 1% + patch: + default: + threshold: 10% + +comment: + layout: "reach,diff,flags" + require_changes: yes + after_n_builds: 4 \ No newline at end of file diff --git a/vendor/github.com/buildpacks/pack/go.mod b/vendor/github.com/buildpacks/pack/go.mod new file mode 100644 index 0000000000..00c992d7c4 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/go.mod @@ -0,0 +1,42 @@ +module github.com/buildpacks/pack + +require ( + github.com/BurntSushi/toml v0.4.1 + github.com/Masterminds/semver v1.5.0 + github.com/apex/log v1.9.0 + github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771 + github.com/buildpacks/lifecycle v0.12.0 + github.com/docker/cli v20.10.10+incompatible + github.com/docker/docker v20.10.10+incompatible + github.com/docker/go-connections v0.4.0 + github.com/gdamore/tcell/v2 v2.4.0 + github.com/ghodss/yaml v1.0.0 + github.com/golang/mock v1.6.0 + github.com/google/go-cmp v0.5.6 + github.com/google/go-containerregistry v0.6.0 + github.com/google/go-github/v30 v30.1.0 + github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 + github.com/heroku/color v0.0.6 + github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e + github.com/moby/sys/mount v0.2.0 // indirect + github.com/onsi/gomega v1.16.0 + github.com/opencontainers/image-spec v1.0.1 + github.com/pelletier/go-toml v1.9.4 + github.com/pkg/errors v0.9.1 + github.com/rivo/tview v0.0.0-20210624165335-29d673af0ce2 + github.com/sabhiram/go-gitignore v0.0.0-20201211074657-223ce5d391b0 + github.com/sclevine/spec v1.4.0 + github.com/sergi/go-diff v1.1.0 // indirect + github.com/spf13/cobra v1.2.1 + github.com/xanzy/ssh-agent v0.3.0 // indirect + golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 + golang.org/x/mod v0.5.1 + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c + golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c // indirect + golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b + gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + gopkg.in/src-d/go-git.v4 v4.13.1 + gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b +) + +go 1.14 diff --git a/vendor/github.com/buildpacks/pack/go.sum b/vendor/github.com/buildpacks/pack/go.sum new file mode 100644 index 0000000000..45ffd8e8d8 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/go.sum @@ -0,0 +1,1369 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795 h1:q4kDoSrHgRoD6okimjwWJOVKyxEUNS2JIuwt+EqcIqQ= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apex/log v1.9.0 h1:FHtw/xuaM8AgmvDDTI9fiwoAL25Sq2cxojnZICUU8l0= +github.com/apex/log v1.9.0/go.mod h1:m82fZlWIuiWzWP04XCTXmnX0xRkYYbCdYn8jbJeLBEA= +github.com/apex/logs v1.0.0/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= +github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= +github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771 h1:QhUkvEeYhnyj80genY7ktXVLttVc52zstwPTUDrImvE= +github.com/buildpacks/imgutil v0.0.0-20211001201950-cf7ae41c3771/go.mod h1:ZfCbsFruoxG0vbEVMsX/afizEo4vn2e88Km7rJ1M2D8= +github.com/buildpacks/lifecycle v0.12.0 h1:H2K4uYfe/y1mfGekOPcR5flmFgmEmaVK9+I5V9Iz+M8= +github.com/buildpacks/lifecycle v0.12.0/go.mod h1:ub/iI72mgJT4d7JgcL06KmtvU4X4HTnFgUyqJKVYH2o= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.2 h1:MG/Bg1pbmMb61j3wHCFWPxESXHieiKr2xG64px/k8zQ= +github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0 h1:UFRRY5JemiAhPZrr/uE0n8fMTLcZsUvySPr1+D7pgr8= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.6.4/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= +github.com/containerd/stargz-snapshotter/estargz v0.7.0 h1:1d/rydzTywc76lnjJb6qbPCiTiCwts49AzKps/Ecblw= +github.com/containerd/stargz-snapshotter/estargz v0.7.0/go.mod h1:83VWDqHnurTKliEB0YvWMiCfLDwv4Cjj1X9Vk98GJZw= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v20.10.7+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.10+incompatible h1:kcbwdgWbrBOH8QwQzaJmyriHwF7XIl4HT1qh0HTRys4= +github.com/docker/cli v20.10.10+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.8+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.10+incompatible h1:GKkP0T7U4ks6X3lmmHKC2QDprnpRJor2Z5a8m62R9ZM= +github.com/docker/docker v20.10.10+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/gdamore/tcell/v2 v2.3.3/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= +github.com/gdamore/tcell/v2 v2.4.0 h1:W6dxJEmaxYvhICFoTY3WrLLEXsQ11SaFnKGVEXW57KM= +github.com/gdamore/tcell/v2 v2.4.0/go.mod h1:cTTuF84Dlj/RqmaCIV5p4w8uG1zWdk0SF6oBpwHp4fU= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.2-0.20210604130445-3bfab55f3bd9/go.mod h1:R5WRYyTdQqTchlBhX4q+WICGh8HQIL5wDFoFZv7Jq6Q= +github.com/google/go-containerregistry v0.6.0 h1:niQ+8XD//kKgArIFwDVBXsWVWbde16LPdHMyNwSC8h4= +github.com/google/go-containerregistry v0.6.0/go.mod h1:euCCtNbZ6tKqi1E72vwDj2xZcN5ttKpZLfa/wSo5iLw= +github.com/google/go-github/v30 v30.1.0 h1:VLDx+UolQICEOKu2m4uAoMti1SxuEBAl7RSEG16L+Oo= +github.com/google/go-github/v30 v30.1.0/go.mod h1:n8jBpHl45a/rlBUtRJMOG4GhNADUQFEufcolZ95JfU8= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/heroku/color v0.0.6 h1:UTFFMrmMLFcL3OweqP1lAdp8i1y/9oHqkeHjQ/b/Ny0= +github.com/heroku/color v0.0.6/go.mod h1:ZBvOcx7cTF2QKOv4LbmoBtNl5uB17qWxGuzZrsi1wLU= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.0 h1:2T7tUoQrQT+fQWdaY5rjWztFGAFwbGD04iPJg90ZiOs= +github.com/klauspost/compress v1.13.0/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY= +github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8= +github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.13 h1:qdl+GuBjcsKKDco5BsxPJlId98mSWNKqYA+Co0SC1yA= +github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e h1:Qa6dnn8DlasdXRnacluu8HzPts0S1I9zvvUPDbBnXFI= +github.com/mitchellh/ioprogress v0.0.0-20180201004757-6a23b12fa88e/go.mod h1:waEya8ee1Ro/lgxpVhkJI4BVASzkm3UZqkx/cFJiYHM= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM= +github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1 h1:1O+1cHA1aujwEwwVMa2Xm2l+gIpUHyd3+D+d7LZh1kM= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= +github.com/onsi/gomega v1.16.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93 h1:x2UMpOOVf3kQ8arv/EsDGwim8PTNqzL1/EYDr/+scOM= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0 h1:+77ba4ar4jsCbL1GLbFL8fFM57w6suPfSS9PDLDY7KM= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rivo/tview v0.0.0-20210624165335-29d673af0ce2 h1:I5N0WNMgPSq5NKUFspB4jMJ6n2P0ipz5FlOlB4BXviQ= +github.com/rivo/tview v0.0.0-20210624165335-29d673af0ce2/go.mod h1:IxQujbYMAh4trWr0Dwa8jfciForjVmxyHpskZX6aydQ= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sabhiram/go-gitignore v0.0.0-20201211074657-223ce5d391b0 h1:4Q/TASkyjpqyR5DL5+6c2FGSDpHM5bTMSspcXr7J6R8= +github.com/sabhiram/go-gitignore v0.0.0-20201211074657-223ce5d391b0/go.mod h1:b18R55ulyQ/h3RaWyloPyER7fWQVZvimKKhnI5OfrJQ= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8= +github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= +github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= +github.com/tj/assert v0.0.3 h1:Df/BlaZ20mq6kuai7f5z2TvPFiwC3xaWJSDQNiIS3Rk= +github.com/tj/assert v0.0.3/go.mod h1:Ne6X72Q+TB1AteidzQncjw9PabbMp4PBMZ1k+vd1Pvk= +github.com/tj/go-buffer v1.1.0/go.mod h1:iyiJpfFcR2B9sXu7KvjbT9fpM4mOelRSDTbntVj52Uc= +github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= +github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= +github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 h1:It14KIkyBFYkHkwZ7k45minvA9aorojkyjGk9KJ5B/w= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b h1:k+E048sYJHyVnsr1GDrRZWQ32D2C7lWs9JRc0bel53A= +golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190529164535-6a60838ec259/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210608053332-aa57babbf139/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c h1:Lyn7+CqXIiC+LOR9aHD6jDK+hPcmAuCfuXztd1v4w1Q= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210603172842-58e84a565dcf/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210610141715-e7a9b787a5a4 h1:Kgs5nmbQVuUAug2PXQ27hu9MSCv8uJTnrmxZj9Lj5lc= +google.golang.org/genproto v0.0.0-20210610141715-e7a9b787a5a4/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/vendor/github.com/buildpacks/pack/golangci.yaml b/vendor/github.com/buildpacks/pack/golangci.yaml new file mode 100644 index 0000000000..b44e01cd9c --- /dev/null +++ b/vendor/github.com/buildpacks/pack/golangci.yaml @@ -0,0 +1,32 @@ +run: + timeout: 6m + +linters: + disable-all: true + enable: + - bodyclose + - deadcode + - dogsled + - gocritic + - goimports + - golint + - gosimple + - govet + - ineffassign + - maligned + - misspell + - nakedret + - rowserrcheck + - scopelint + - staticcheck + - structcheck + - stylecheck + - typecheck + - unconvert + - unused + - varcheck + - whitespace + +linters-settings: + goimports: + local-prefixes: github.com/buildpacks/pack diff --git a/vendor/github.com/buildpacks/pack/internal/build/container_ops.go b/vendor/github.com/buildpacks/pack/internal/build/container_ops.go new file mode 100644 index 0000000000..8ea6a093ac --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/build/container_ops.go @@ -0,0 +1,283 @@ +package build + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/BurntSushi/toml" + "github.com/buildpacks/lifecycle/platform" + "github.com/docker/docker/api/types" + dcontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/builder" + "github.com/buildpacks/pack/internal/container" + "github.com/buildpacks/pack/internal/paths" + "github.com/buildpacks/pack/pkg/archive" +) + +type ContainerOperation func(ctrClient client.CommonAPIClient, ctx context.Context, containerID string, stdout, stderr io.Writer) error + +// CopyDir copies a local directory (src) to the destination on the container while filtering files and changing it's UID/GID. +// if includeRoot is set the UID/GID will be set on the dst directory. +func CopyDir(src, dst string, uid, gid int, os string, includeRoot bool, fileFilter func(string) bool) ContainerOperation { + return func(ctrClient client.CommonAPIClient, ctx context.Context, containerID string, stdout, stderr io.Writer) error { + tarPath := dst + if os == "windows" { + tarPath = paths.WindowsToSlash(dst) + } + + reader, err := createReader(src, tarPath, uid, gid, includeRoot, fileFilter) + if err != nil { + return errors.Wrapf(err, "create tar archive from '%s'", src) + } + defer reader.Close() + + if os == "windows" { + return copyDirWindows(ctx, ctrClient, containerID, reader, dst, stdout, stderr) + } + return copyDir(ctx, ctrClient, containerID, reader) + } +} + +func copyDir(ctx context.Context, ctrClient client.CommonAPIClient, containerID string, appReader io.Reader) error { + var clientErr, err error + + doneChan := make(chan interface{}) + pr, pw := io.Pipe() + go func() { + clientErr = ctrClient.CopyToContainer(ctx, containerID, "/", pr, types.CopyToContainerOptions{}) + close(doneChan) + }() + func() { + defer pw.Close() + _, err = io.Copy(pw, appReader) + }() + + <-doneChan + if err == nil { + err = clientErr + } + + return err +} + +// copyDirWindows provides an alternate, Windows container-specific implementation of copyDir. +// This implementation is needed because copying directly to a mounted volume is currently buggy +// for Windows containers and does not work. Instead, we perform the copy from inside a container +// using xcopy. +// See: https://github.com/moby/moby/issues/40771 +func copyDirWindows(ctx context.Context, ctrClient client.CommonAPIClient, containerID string, reader io.Reader, dst string, stdout, stderr io.Writer) error { + info, err := ctrClient.ContainerInspect(ctx, containerID) + if err != nil { + return err + } + + baseName := paths.WindowsBasename(dst) + + mnt, err := findMount(info, dst) + if err != nil { + return err + } + + ctr, err := ctrClient.ContainerCreate(ctx, + &dcontainer.Config{ + Image: info.Image, + Cmd: []string{ + "cmd", + "/c", + + // xcopy args + // e - recursively create subdirectories + // h - copy hidden and system files + // b - copy symlinks, do not dereference + // x - copy attributes + // y - suppress prompting + fmt.Sprintf(`xcopy c:\windows\%s %s /e /h /b /x /y`, baseName, dst), + }, + WorkingDir: "/", + User: windowsContainerAdmin, + }, + &dcontainer.HostConfig{ + Binds: []string{fmt.Sprintf("%s:%s", mnt.Name, mnt.Destination)}, + Isolation: dcontainer.IsolationProcess, + }, + nil, nil, "", + ) + if err != nil { + return errors.Wrapf(err, "creating prep container") + } + defer ctrClient.ContainerRemove(context.Background(), ctr.ID, types.ContainerRemoveOptions{Force: true}) + + err = ctrClient.CopyToContainer(ctx, ctr.ID, "/windows", reader, types.CopyToContainerOptions{}) + if err != nil { + return errors.Wrap(err, "copy app to container") + } + + return container.RunWithHandler( + ctx, + ctrClient, + ctr.ID, + container.DefaultHandler( + ioutil.Discard, // Suppress xcopy output + stderr, + ), + ) +} + +func findMount(info types.ContainerJSON, dst string) (types.MountPoint, error) { + for _, m := range info.Mounts { + if m.Destination == dst { + return m, nil + } + } + return types.MountPoint{}, fmt.Errorf("no matching mount found for %s", dst) +} + +//WriteProjectMetadata +func WriteProjectMetadata(p string, metadata platform.ProjectMetadata, os string) ContainerOperation { + return func(ctrClient client.CommonAPIClient, ctx context.Context, containerID string, stdout, stderr io.Writer) error { + buf := &bytes.Buffer{} + err := toml.NewEncoder(buf).Encode(metadata) + if err != nil { + return errors.Wrap(err, "marshaling project metadata") + } + + tarBuilder := archive.TarBuilder{} + + tarPath := p + if os == "windows" { + tarPath = paths.WindowsToSlash(p) + } + + tarBuilder.AddFile(tarPath, 0755, archive.NormalizedDateTime, buf.Bytes()) + reader := tarBuilder.Reader(archive.DefaultTarWriterFactory()) + defer reader.Close() + + if os == "windows" { + dirName := paths.WindowsDir(p) + return copyDirWindows(ctx, ctrClient, containerID, reader, dirName, stdout, stderr) + } + + return ctrClient.CopyToContainer(ctx, containerID, "/", reader, types.CopyToContainerOptions{}) + } +} + +// WriteStackToml writes a `stack.toml` based on the StackMetadata provided to the destination path. +func WriteStackToml(dstPath string, stack builder.StackMetadata, os string) ContainerOperation { + return func(ctrClient client.CommonAPIClient, ctx context.Context, containerID string, stdout, stderr io.Writer) error { + buf := &bytes.Buffer{} + err := toml.NewEncoder(buf).Encode(stack) + if err != nil { + return errors.Wrap(err, "marshaling stack metadata") + } + + tarBuilder := archive.TarBuilder{} + + tarPath := dstPath + if os == "windows" { + tarPath = paths.WindowsToSlash(dstPath) + } + + tarBuilder.AddFile(tarPath, 0755, archive.NormalizedDateTime, buf.Bytes()) + reader := tarBuilder.Reader(archive.DefaultTarWriterFactory()) + defer reader.Close() + + if os == "windows" { + dirName := paths.WindowsDir(dstPath) + return copyDirWindows(ctx, ctrClient, containerID, reader, dirName, stdout, stderr) + } + + return ctrClient.CopyToContainer(ctx, containerID, "/", reader, types.CopyToContainerOptions{}) + } +} + +func createReader(src, dst string, uid, gid int, includeRoot bool, fileFilter func(string) bool) (io.ReadCloser, error) { + fi, err := os.Stat(src) + if err != nil { + return nil, err + } + + if fi.IsDir() { + var mode int64 = -1 + if runtime.GOOS == "windows" { + mode = 0777 + } + + return archive.ReadDirAsTar(src, dst, uid, gid, mode, false, includeRoot, fileFilter), nil + } + + return archive.ReadZipAsTar(src, dst, uid, gid, -1, false, fileFilter), nil +} + +// EnsureVolumeAccess grants full access permissions to volumes for UID/GID-based user +// When UID/GID are 0 it grants explicit full access to BUILTIN\Administrators and any other UID/GID grants full access to BUILTIN\Users +// Changing permissions on volumes through stopped containers does not work on Docker for Windows so we start the container and make change using icacls +// See: https://github.com/moby/moby/issues/40771 +func EnsureVolumeAccess(uid, gid int, os string, volumeNames ...string) ContainerOperation { + return func(ctrClient client.CommonAPIClient, ctx context.Context, containerID string, stdout, stderr io.Writer) error { + if os != "windows" { + return nil + } + + containerInfo, err := ctrClient.ContainerInspect(ctx, containerID) + if err != nil { + return err + } + + cmd := "" + binds := []string{} + for i, volumeName := range volumeNames { + containerPath := fmt.Sprintf("c:/volume-mnt-%d", i) + binds = append(binds, fmt.Sprintf("%s:%s", volumeName, containerPath)) + + if cmd != "" { + cmd += "&&" + } + + // icacls args + // /grant - add new permissions instead of replacing + // (OI) - object inherit + // (CI) - container inherit + // F - full access + // /t - recursively apply + // /l - perform on a symbolic link itself versus its target + // /q - suppress success messages + cmd += fmt.Sprintf(`icacls %s /grant *%s:(OI)(CI)F /t /l /q`, containerPath, paths.WindowsPathSID(uid, gid)) + } + + ctr, err := ctrClient.ContainerCreate(ctx, + &dcontainer.Config{ + Image: containerInfo.Image, + Cmd: []string{"cmd", "/c", cmd}, + WorkingDir: "/", + User: windowsContainerAdmin, + }, + &dcontainer.HostConfig{ + Binds: binds, + Isolation: dcontainer.IsolationProcess, + }, + nil, nil, "", + ) + if err != nil { + return err + } + defer ctrClient.ContainerRemove(context.Background(), ctr.ID, types.ContainerRemoveOptions{Force: true}) + + return container.RunWithHandler( + ctx, + ctrClient, + ctr.ID, + container.DefaultHandler( + ioutil.Discard, // Suppress icacls output + stderr, + ), + ) + } +} diff --git a/vendor/github.com/buildpacks/pack/internal/build/lifecycle_execution.go b/vendor/github.com/buildpacks/pack/internal/build/lifecycle_execution.go new file mode 100644 index 0000000000..b50cd5ec10 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/build/lifecycle_execution.go @@ -0,0 +1,555 @@ +package build + +import ( + "context" + "fmt" + "math/rand" + "strconv" + + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/auth" + "github.com/docker/docker/client" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/builder" + "github.com/buildpacks/pack/internal/cache" + "github.com/buildpacks/pack/internal/paths" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/logging" +) + +const ( + defaultProcessType = "web" + overrideGID = 0 +) + +type LifecycleExecution struct { + logger logging.Logger + docker client.CommonAPIClient + platformAPI *api.Version + layersVolume string + appVolume string + os string + mountPaths mountPaths + opts LifecycleOptions +} + +func NewLifecycleExecution(logger logging.Logger, docker client.CommonAPIClient, opts LifecycleOptions) (*LifecycleExecution, error) { + latestSupportedPlatformAPI, err := findLatestSupported(append( + opts.Builder.LifecycleDescriptor().APIs.Platform.Deprecated, + opts.Builder.LifecycleDescriptor().APIs.Platform.Supported..., + )) + if err != nil { + return nil, err + } + + osType, err := opts.Builder.Image().OS() + if err != nil { + return nil, err + } + + exec := &LifecycleExecution{ + logger: logger, + docker: docker, + layersVolume: paths.FilterReservedNames("pack-layers-" + randString(10)), + appVolume: paths.FilterReservedNames("pack-app-" + randString(10)), + platformAPI: latestSupportedPlatformAPI, + opts: opts, + os: osType, + mountPaths: mountPathsForOS(osType, opts.Workspace), + } + + if opts.Interactive { + exec.logger = opts.Termui + } + + return exec, nil +} + +func findLatestSupported(apis []*api.Version) (*api.Version, error) { + for i := len(SupportedPlatformAPIVersions) - 1; i >= 0; i-- { + for _, version := range apis { + if SupportedPlatformAPIVersions[i].Equal(version) { + return version, nil + } + } + } + + return nil, errors.New("unable to find a supported Platform API version") +} + +func randString(n int) string { + b := make([]byte, n) + for i := range b { + b[i] = 'a' + byte(rand.Intn(26)) + } + return string(b) +} + +func (l *LifecycleExecution) Builder() Builder { + return l.opts.Builder +} + +func (l *LifecycleExecution) AppPath() string { + return l.opts.AppPath +} + +func (l LifecycleExecution) AppDir() string { + return l.mountPaths.appDir() +} + +func (l *LifecycleExecution) AppVolume() string { + return l.appVolume +} + +func (l *LifecycleExecution) LayersVolume() string { + return l.layersVolume +} + +func (l *LifecycleExecution) PlatformAPI() *api.Version { + return l.platformAPI +} + +func (l *LifecycleExecution) ImageName() name.Reference { + return l.opts.Image +} + +func (l *LifecycleExecution) PrevImageName() string { + return l.opts.PreviousImage +} + +func (l *LifecycleExecution) Run(ctx context.Context, phaseFactoryCreator PhaseFactoryCreator) error { + phaseFactory := phaseFactoryCreator(l) + var buildCache Cache + if l.opts.CacheImage != "" { + cacheImage, err := name.ParseReference(l.opts.CacheImage, name.WeakValidation) + if err != nil { + return fmt.Errorf("invalid cache image name: %s", err) + } + buildCache = cache.NewImageCache(cacheImage, l.docker) + } else { + buildCache = cache.NewVolumeCache(l.opts.Image, "build", l.docker) + } + + l.logger.Debugf("Using build cache volume %s", style.Symbol(buildCache.Name())) + if l.opts.ClearCache { + if err := buildCache.Clear(ctx); err != nil { + return errors.Wrap(err, "clearing build cache") + } + l.logger.Debugf("Build cache %s cleared", style.Symbol(buildCache.Name())) + } + + launchCache := cache.NewVolumeCache(l.opts.Image, "launch", l.docker) + + if !l.opts.UseCreator { + l.logger.Info(style.Step("DETECTING")) + if err := l.Detect(ctx, l.opts.Network, l.opts.Volumes, phaseFactory); err != nil { + return err + } + + l.logger.Info(style.Step("ANALYZING")) + if err := l.Analyze(ctx, l.opts.Image.String(), l.opts.Network, l.opts.Publish, l.opts.DockerHost, l.opts.ClearCache, buildCache, phaseFactory); err != nil { + return err + } + + l.logger.Info(style.Step("RESTORING")) + if l.opts.ClearCache { + l.logger.Info("Skipping 'restore' due to clearing cache") + } else if err := l.Restore(ctx, l.opts.Network, buildCache, phaseFactory); err != nil { + return err + } + + l.logger.Info(style.Step("BUILDING")) + + if err := l.Build(ctx, l.opts.Network, l.opts.Volumes, phaseFactory); err != nil { + return err + } + + l.logger.Info(style.Step("EXPORTING")) + return l.Export(ctx, l.opts.Image.String(), l.opts.RunImage, l.opts.Publish, l.opts.DockerHost, l.opts.Network, buildCache, launchCache, l.opts.AdditionalTags, phaseFactory) + } + + return l.Create(ctx, l.opts.Publish, l.opts.DockerHost, l.opts.ClearCache, l.opts.RunImage, l.opts.Image.String(), l.opts.Network, buildCache, launchCache, l.opts.AdditionalTags, l.opts.Volumes, phaseFactory) +} + +func (l *LifecycleExecution) Cleanup() error { + var reterr error + if err := l.docker.VolumeRemove(context.Background(), l.layersVolume, true); err != nil { + reterr = errors.Wrapf(err, "failed to clean up layers volume %s", l.layersVolume) + } + if err := l.docker.VolumeRemove(context.Background(), l.appVolume, true); err != nil { + reterr = errors.Wrapf(err, "failed to clean up app volume %s", l.appVolume) + } + return reterr +} + +func (l *LifecycleExecution) Create(ctx context.Context, publish bool, dockerHost string, clearCache bool, runImage, repoName, networkMode string, buildCache, launchCache Cache, additionalTags, volumes []string, phaseFactory PhaseFactory) error { + flags := addTags([]string{ + "-app", l.mountPaths.appDir(), + "-cache-dir", l.mountPaths.cacheDir(), + "-run-image", runImage, + }, additionalTags) + + if clearCache { + flags = append(flags, "-skip-restore") + } + + if l.opts.GID >= overrideGID { + flags = append(flags, "-gid", strconv.Itoa(l.opts.GID)) + } + + if l.opts.PreviousImage != "" { + if l.opts.Image == nil { + return errors.New("image can't be nil") + } + + image, err := name.ParseReference(l.opts.Image.Name(), name.WeakValidation) + if err != nil { + return fmt.Errorf("invalid image name: %s", err) + } + + prevImage, err := name.ParseReference(l.opts.PreviousImage, name.WeakValidation) + if err != nil { + return fmt.Errorf("invalid previous image name: %s", err) + } + if publish { + if image.Context().RegistryStr() != prevImage.Context().RegistryStr() { + return fmt.Errorf(`when --publish is used, must be in the same image registry as + image registry = %s + previous-image registry = %s`, image.Context().RegistryStr(), prevImage.Context().RegistryStr()) + } + } + + flags = append(flags, "-previous-image", l.opts.PreviousImage) + } + + processType := determineDefaultProcessType(l.platformAPI, l.opts.DefaultProcessType) + if processType != "" { + flags = append(flags, "-process-type", processType) + } + + var cacheOpts PhaseConfigProviderOperation + switch buildCache.Type() { + case cache.Image: + flags = append(flags, "-cache-image", buildCache.Name()) + cacheOpts = WithBinds(volumes...) + case cache.Volume: + cacheOpts = WithBinds(append(volumes, fmt.Sprintf("%s:%s", buildCache.Name(), l.mountPaths.cacheDir()))...) + } + + opts := []PhaseConfigProviderOperation{ + WithFlags(l.withLogLevel(flags...)...), + WithArgs(repoName), + WithNetwork(networkMode), + cacheOpts, + WithContainerOperations(WriteProjectMetadata(l.mountPaths.projectPath(), l.opts.ProjectMetadata, l.os)), + WithContainerOperations(CopyDir(l.opts.AppPath, l.mountPaths.appDir(), l.opts.Builder.UID(), l.opts.Builder.GID(), l.os, true, l.opts.FileFilter)), + } + + if publish { + authConfig, err := auth.BuildEnvVar(authn.DefaultKeychain, repoName) + if err != nil { + return err + } + + opts = append(opts, WithRoot(), WithRegistryAccess(authConfig)) + } else { + opts = append(opts, + WithDaemonAccess(dockerHost), + WithFlags("-daemon", "-launch-cache", l.mountPaths.launchCacheDir()), + WithBinds(fmt.Sprintf("%s:%s", launchCache.Name(), l.mountPaths.launchCacheDir())), + ) + } + + create := phaseFactory.New(NewPhaseConfigProvider("creator", l, opts...)) + defer create.Cleanup() + return create.Run(ctx) +} + +func (l *LifecycleExecution) Detect(ctx context.Context, networkMode string, volumes []string, phaseFactory PhaseFactory) error { + flags := []string{"-app", l.mountPaths.appDir()} + configProvider := NewPhaseConfigProvider( + "detector", + l, + WithLogPrefix("detector"), + WithArgs( + l.withLogLevel()..., + ), + WithNetwork(networkMode), + WithBinds(volumes...), + WithContainerOperations( + EnsureVolumeAccess(l.opts.Builder.UID(), l.opts.Builder.GID(), l.os, l.layersVolume, l.appVolume), + CopyDir(l.opts.AppPath, l.mountPaths.appDir(), l.opts.Builder.UID(), l.opts.Builder.GID(), l.os, true, l.opts.FileFilter), + ), + WithFlags(flags...), + ) + + detect := phaseFactory.New(configProvider) + defer detect.Cleanup() + return detect.Run(ctx) +} + +func (l *LifecycleExecution) Restore(ctx context.Context, networkMode string, buildCache Cache, phaseFactory PhaseFactory) error { + flagsOpt := NullOp() + cacheOpt := NullOp() + switch buildCache.Type() { + case cache.Image: + flagsOpt = WithFlags("-cache-image", buildCache.Name()) + case cache.Volume: + cacheOpt = WithBinds(fmt.Sprintf("%s:%s", buildCache.Name(), l.mountPaths.cacheDir())) + } + if l.opts.GID >= overrideGID { + flagsOpt = WithFlags("-gid", strconv.Itoa(l.opts.GID)) + } + + configProvider := NewPhaseConfigProvider( + "restorer", + l, + WithLogPrefix("restorer"), + WithImage(l.opts.LifecycleImage), + WithEnv(fmt.Sprintf("%s=%d", builder.EnvUID, l.opts.Builder.UID()), fmt.Sprintf("%s=%d", builder.EnvGID, l.opts.Builder.GID())), + WithRoot(), // remove after platform API 0.2 is no longer supported + WithArgs( + l.withLogLevel( + "-cache-dir", l.mountPaths.cacheDir(), + )..., + ), + WithNetwork(networkMode), + flagsOpt, + cacheOpt, + ) + + restore := phaseFactory.New(configProvider) + defer restore.Cleanup() + return restore.Run(ctx) +} + +func (l *LifecycleExecution) Analyze(ctx context.Context, repoName, networkMode string, publish bool, dockerHost string, clearCache bool, cache Cache, phaseFactory PhaseFactory) error { + analyze, err := l.newAnalyze(repoName, networkMode, publish, dockerHost, clearCache, cache, phaseFactory) + if err != nil { + return err + } + defer analyze.Cleanup() + return analyze.Run(ctx) +} + +func (l *LifecycleExecution) newAnalyze(repoName, networkMode string, publish bool, dockerHost string, clearCache bool, buildCache Cache, phaseFactory PhaseFactory) (RunnerCleaner, error) { + args := []string{ + repoName, + } + if clearCache { + args = prependArg("-skip-layers", args) + } else { + args = append([]string{"-cache-dir", l.mountPaths.cacheDir()}, args...) + } + + cacheOpt := NullOp() + flagsOpt := NullOp() + switch buildCache.Type() { + case cache.Image: + if !clearCache { + flagsOpt = WithFlags("-cache-image", buildCache.Name()) + } + case cache.Volume: + cacheOpt = WithBinds(fmt.Sprintf("%s:%s", buildCache.Name(), l.mountPaths.cacheDir())) + } + + if l.opts.GID >= overrideGID { + flagsOpt = WithFlags("-gid", strconv.Itoa(l.opts.GID)) + } + + if l.opts.PreviousImage != "" { + if l.opts.Image == nil { + return nil, errors.New("image can't be nil") + } + + image, err := name.ParseReference(l.opts.Image.Name(), name.WeakValidation) + if err != nil { + return nil, fmt.Errorf("invalid image name: %s", err) + } + + prevImage, err := name.ParseReference(l.opts.PreviousImage, name.WeakValidation) + if err != nil { + return nil, fmt.Errorf("invalid previous image name: %s", err) + } + if publish { + if image.Context().RegistryStr() != prevImage.Context().RegistryStr() { + return nil, fmt.Errorf(`when --publish is used, must be in the same image registry as + image registry = %s + previous-image registry = %s`, image.Context().RegistryStr(), prevImage.Context().RegistryStr()) + } + } + + l.opts.Image = prevImage + } + + if publish { + authConfig, err := auth.BuildEnvVar(authn.DefaultKeychain, repoName) + if err != nil { + return nil, err + } + + configProvider := NewPhaseConfigProvider( + "analyzer", + l, + WithLogPrefix("analyzer"), + WithImage(l.opts.LifecycleImage), + WithEnv(fmt.Sprintf("%s=%d", builder.EnvUID, l.opts.Builder.UID()), fmt.Sprintf("%s=%d", builder.EnvGID, l.opts.Builder.GID())), + WithRegistryAccess(authConfig), + WithRoot(), + WithArgs(l.withLogLevel(args...)...), + WithNetwork(networkMode), + flagsOpt, + cacheOpt, + ) + + return phaseFactory.New(configProvider), nil + } + + // TODO: when platform API 0.2 is no longer supported we can delete this code: https://github.com/buildpacks/pack/issues/629. + configProvider := NewPhaseConfigProvider( + "analyzer", + l, + WithLogPrefix("analyzer"), + WithImage(l.opts.LifecycleImage), + WithEnv( + fmt.Sprintf("%s=%d", builder.EnvUID, l.opts.Builder.UID()), + fmt.Sprintf("%s=%d", builder.EnvGID, l.opts.Builder.GID()), + ), + WithDaemonAccess(dockerHost), + WithArgs( + l.withLogLevel( + prependArg( + "-daemon", + args, + )..., + )..., + ), + flagsOpt, + WithNetwork(networkMode), + cacheOpt, + ) + + return phaseFactory.New(configProvider), nil +} + +func (l *LifecycleExecution) Build(ctx context.Context, networkMode string, volumes []string, phaseFactory PhaseFactory) error { + flags := []string{"-app", l.mountPaths.appDir()} + configProvider := NewPhaseConfigProvider( + "builder", + l, + WithLogPrefix("builder"), + WithArgs(l.withLogLevel()...), + WithNetwork(networkMode), + WithBinds(volumes...), + WithFlags(flags...), + ) + + build := phaseFactory.New(configProvider) + defer build.Cleanup() + return build.Run(ctx) +} + +func determineDefaultProcessType(platformAPI *api.Version, providedValue string) string { + shouldSetForceDefault := platformAPI.Compare(api.MustParse("0.4")) >= 0 && + platformAPI.Compare(api.MustParse("0.6")) < 0 + if providedValue == "" && shouldSetForceDefault { + return defaultProcessType + } + + return providedValue +} + +func (l *LifecycleExecution) newExport(repoName, runImage string, publish bool, dockerHost, networkMode string, buildCache, launchCache Cache, additionalTags []string, phaseFactory PhaseFactory) (RunnerCleaner, error) { + flags := []string{ + "-app", l.mountPaths.appDir(), + "-cache-dir", l.mountPaths.cacheDir(), + "-stack", l.mountPaths.stackPath(), + "-run-image", runImage, + } + + processType := determineDefaultProcessType(l.platformAPI, l.opts.DefaultProcessType) + if processType != "" { + flags = append(flags, "-process-type", processType) + } + if l.opts.GID >= overrideGID { + flags = append(flags, "-gid", strconv.Itoa(l.opts.GID)) + } + + cacheOpt := NullOp() + switch buildCache.Type() { + case cache.Image: + flags = append(flags, "-cache-image", buildCache.Name()) + case cache.Volume: + cacheOpt = WithBinds(fmt.Sprintf("%s:%s", buildCache.Name(), l.mountPaths.cacheDir())) + } + + opts := []PhaseConfigProviderOperation{ + WithLogPrefix("exporter"), + WithImage(l.opts.LifecycleImage), + WithEnv( + fmt.Sprintf("%s=%d", builder.EnvUID, l.opts.Builder.UID()), + fmt.Sprintf("%s=%d", builder.EnvGID, l.opts.Builder.GID()), + ), + WithFlags( + l.withLogLevel(flags...)..., + ), + WithArgs(append([]string{repoName}, additionalTags...)...), + WithRoot(), + WithNetwork(networkMode), + cacheOpt, + WithContainerOperations(WriteStackToml(l.mountPaths.stackPath(), l.opts.Builder.Stack(), l.os)), + WithContainerOperations(WriteProjectMetadata(l.mountPaths.projectPath(), l.opts.ProjectMetadata, l.os)), + } + + if publish { + authConfig, err := auth.BuildEnvVar(authn.DefaultKeychain, repoName, runImage) + if err != nil { + return nil, err + } + + opts = append( + opts, + WithRegistryAccess(authConfig), + WithRoot(), + ) + } else { + opts = append( + opts, + WithDaemonAccess(dockerHost), + WithFlags("-daemon", "-launch-cache", l.mountPaths.launchCacheDir()), + WithBinds(fmt.Sprintf("%s:%s", launchCache.Name(), l.mountPaths.launchCacheDir())), + ) + } + + return phaseFactory.New(NewPhaseConfigProvider("exporter", l, opts...)), nil +} + +func (l *LifecycleExecution) Export(ctx context.Context, repoName, runImage string, publish bool, dockerHost, networkMode string, buildCache, launchCache Cache, additionalTags []string, phaseFactory PhaseFactory) error { + export, err := l.newExport(repoName, runImage, publish, dockerHost, networkMode, buildCache, launchCache, additionalTags, phaseFactory) + if err != nil { + return err + } + defer export.Cleanup() + return export.Run(ctx) +} + +func (l *LifecycleExecution) withLogLevel(args ...string) []string { + if l.logger.IsVerbose() { + return append([]string{"-log-level", "debug"}, args...) + } + return args +} + +func prependArg(arg string, args []string) []string { + return append([]string{arg}, args...) +} + +func addTags(flags, additionalTags []string) []string { + for _, tag := range additionalTags { + flags = append(flags, "-tag", tag) + } + return flags +} diff --git a/vendor/github.com/buildpacks/pack/internal/build/lifecycle_executor.go b/vendor/github.com/buildpacks/pack/internal/build/lifecycle_executor.go new file mode 100644 index 0000000000..95ccbb214f --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/build/lifecycle_executor.go @@ -0,0 +1,108 @@ +package build + +import ( + "context" + "math/rand" + "time" + + "github.com/buildpacks/imgutil" + "github.com/buildpacks/lifecycle/api" + "github.com/buildpacks/lifecycle/platform" + "github.com/docker/docker/client" + "github.com/google/go-containerregistry/pkg/name" + + "github.com/buildpacks/pack/internal/builder" + "github.com/buildpacks/pack/internal/cache" + "github.com/buildpacks/pack/internal/container" + "github.com/buildpacks/pack/pkg/logging" +) + +var ( + // SupportedPlatformAPIVersions lists the Platform API versions pack supports listed from earliest to latest + SupportedPlatformAPIVersions = builder.APISet{ + api.MustParse("0.3"), + api.MustParse("0.4"), + api.MustParse("0.5"), + api.MustParse("0.6"), + } +) + +type Builder interface { + Name() string + UID() int + GID() int + LifecycleDescriptor() builder.LifecycleDescriptor + Stack() builder.StackMetadata + Image() imgutil.Image +} + +type LifecycleExecutor struct { + logger logging.Logger + docker client.CommonAPIClient +} + +type Cache interface { + Name() string + Clear(context.Context) error + Type() cache.Type +} + +type Termui interface { + logging.Logger + + Run(funk func()) error + Handler() container.Handler +} + +func init() { + rand.Seed(time.Now().UTC().UnixNano()) +} + +type LifecycleOptions struct { + AppPath string + Image name.Reference + Builder Builder + LifecycleImage string + RunImage string + ProjectMetadata platform.ProjectMetadata + ClearCache bool + Publish bool + TrustBuilder bool + UseCreator bool + Interactive bool + Termui Termui + DockerHost string + CacheImage string + HTTPProxy string + HTTPSProxy string + NoProxy string + Network string + AdditionalTags []string + Volumes []string + DefaultProcessType string + FileFilter func(string) bool + Workspace string + GID int + PreviousImage string +} + +func NewLifecycleExecutor(logger logging.Logger, docker client.CommonAPIClient) *LifecycleExecutor { + return &LifecycleExecutor{logger: logger, docker: docker} +} + +func (l *LifecycleExecutor) Execute(ctx context.Context, opts LifecycleOptions) error { + lifecycleExec, err := NewLifecycleExecution(l.logger, l.docker, opts) + if err != nil { + return err + } + + if !opts.Interactive { + defer lifecycleExec.Cleanup() + return lifecycleExec.Run(ctx, NewDefaultPhaseFactory) + } + + return opts.Termui.Run(func() { + defer lifecycleExec.Cleanup() + lifecycleExec.Run(ctx, NewDefaultPhaseFactory) + }) +} diff --git a/vendor/github.com/buildpacks/pack/internal/build/mount_paths.go b/vendor/github.com/buildpacks/pack/internal/build/mount_paths.go new file mode 100644 index 0000000000..015052de75 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/build/mount_paths.go @@ -0,0 +1,59 @@ +package build + +import "strings" + +type mountPaths struct { + volume string + separator string + workspace string +} + +func mountPathsForOS(os, workspace string) mountPaths { + if workspace == "" { + workspace = "workspace" + } + if os == "windows" { + return mountPaths{ + volume: `c:`, + separator: `\`, + workspace: workspace, + } + } + return mountPaths{ + volume: "", + separator: "/", + workspace: workspace, + } +} + +func (m mountPaths) join(parts ...string) string { + return strings.Join(parts, m.separator) +} + +func (m mountPaths) layersDir() string { + return m.join(m.volume, "layers") +} + +func (m mountPaths) stackPath() string { + return m.join(m.layersDir(), "stack.toml") +} + +func (m mountPaths) projectPath() string { + return m.join(m.layersDir(), "project-metadata.toml") +} + +func (m mountPaths) appDirName() string { + return m.workspace +} + +func (m mountPaths) appDir() string { + return m.join(m.volume, m.appDirName()) +} + +func (m mountPaths) cacheDir() string { + return m.join(m.volume, "cache") +} + +func (m mountPaths) launchCacheDir() string { + return m.join(m.volume, "launch-cache") +} diff --git a/vendor/github.com/buildpacks/pack/internal/build/phase.go b/vendor/github.com/buildpacks/pack/internal/build/phase.go new file mode 100644 index 0000000000..3793977d06 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/build/phase.go @@ -0,0 +1,58 @@ +package build + +import ( + "context" + "io" + + "github.com/docker/docker/api/types" + dcontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/container" +) + +type Phase struct { + name string + infoWriter io.Writer + errorWriter io.Writer + docker client.CommonAPIClient + handler container.Handler + ctrConf *dcontainer.Config + hostConf *dcontainer.HostConfig + ctr dcontainer.ContainerCreateCreatedBody + uid, gid int + appPath string + containerOps []ContainerOperation + fileFilter func(string) bool +} + +func (p *Phase) Run(ctx context.Context) error { + var err error + p.ctr, err = p.docker.ContainerCreate(ctx, p.ctrConf, p.hostConf, nil, nil, "") + if err != nil { + return errors.Wrapf(err, "failed to create '%s' container", p.name) + } + + for _, containerOp := range p.containerOps { + if err := containerOp(p.docker, ctx, p.ctr.ID, p.infoWriter, p.errorWriter); err != nil { + return err + } + } + + handler := container.DefaultHandler(p.infoWriter, p.errorWriter) + if p.handler != nil { + handler = p.handler + } + + return container.RunWithHandler( + ctx, + p.docker, + p.ctr.ID, + handler, + ) +} + +func (p *Phase) Cleanup() error { + return p.docker.ContainerRemove(context.Background(), p.ctr.ID, types.ContainerRemoveOptions{Force: true}) +} diff --git a/vendor/github.com/buildpacks/pack/internal/build/phase_config_provider.go b/vendor/github.com/buildpacks/pack/internal/build/phase_config_provider.go new file mode 100644 index 0000000000..c9edc29adf --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/build/phase_config_provider.go @@ -0,0 +1,233 @@ +package build + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/docker/docker/api/types/container" + + pcontainer "github.com/buildpacks/pack/internal/container" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/logging" +) + +const ( + linuxContainerAdmin = "root" + windowsContainerAdmin = "ContainerAdministrator" + platformAPIEnvVar = "CNB_PLATFORM_API" +) + +type PhaseConfigProviderOperation func(*PhaseConfigProvider) + +type PhaseConfigProvider struct { + ctrConf *container.Config + hostConf *container.HostConfig + name string + os string + containerOps []ContainerOperation + infoWriter io.Writer + errorWriter io.Writer + handler pcontainer.Handler +} + +func NewPhaseConfigProvider(name string, lifecycleExec *LifecycleExecution, ops ...PhaseConfigProviderOperation) *PhaseConfigProvider { + provider := &PhaseConfigProvider{ + ctrConf: new(container.Config), + hostConf: new(container.HostConfig), + name: name, + os: lifecycleExec.os, + infoWriter: logging.GetWriterForLevel(lifecycleExec.logger, logging.InfoLevel), + errorWriter: logging.GetWriterForLevel(lifecycleExec.logger, logging.ErrorLevel), + } + + provider.ctrConf.Image = lifecycleExec.opts.Builder.Name() + provider.ctrConf.Labels = map[string]string{"author": "pack"} + + if lifecycleExec.os == "windows" { + provider.hostConf.Isolation = container.IsolationProcess + } + + ops = append(ops, + WithEnv(fmt.Sprintf("%s=%s", platformAPIEnvVar, lifecycleExec.platformAPI.String())), + WithLifecycleProxy(lifecycleExec), + WithBinds([]string{ + fmt.Sprintf("%s:%s", lifecycleExec.layersVolume, lifecycleExec.mountPaths.layersDir()), + fmt.Sprintf("%s:%s", lifecycleExec.appVolume, lifecycleExec.mountPaths.appDir()), + }...), + ) + + for _, op := range ops { + op(provider) + } + + provider.ctrConf.Cmd = append([]string{"/cnb/lifecycle/" + name}, provider.ctrConf.Cmd...) + + lifecycleExec.logger.Debugf("Running the %s on OS %s with:", style.Symbol(provider.Name()), style.Symbol(provider.os)) + lifecycleExec.logger.Debug("Container Settings:") + lifecycleExec.logger.Debugf(" Args: %s", style.Symbol(strings.Join(provider.ctrConf.Cmd, " "))) + lifecycleExec.logger.Debugf(" System Envs: %s", style.Symbol(strings.Join(provider.ctrConf.Env, " "))) + lifecycleExec.logger.Debugf(" Image: %s", style.Symbol(provider.ctrConf.Image)) + lifecycleExec.logger.Debugf(" User: %s", style.Symbol(provider.ctrConf.User)) + lifecycleExec.logger.Debugf(" Labels: %s", style.Symbol(fmt.Sprintf("%s", provider.ctrConf.Labels))) + + lifecycleExec.logger.Debug("Host Settings:") + lifecycleExec.logger.Debugf(" Binds: %s", style.Symbol(strings.Join(provider.hostConf.Binds, " "))) + lifecycleExec.logger.Debugf(" Network Mode: %s", style.Symbol(string(provider.hostConf.NetworkMode))) + + if lifecycleExec.opts.Interactive { + provider.handler = lifecycleExec.opts.Termui.Handler() + } + + return provider +} + +func (p *PhaseConfigProvider) ContainerConfig() *container.Config { + return p.ctrConf +} + +func (p *PhaseConfigProvider) ContainerOps() []ContainerOperation { + return p.containerOps +} + +func (p *PhaseConfigProvider) HostConfig() *container.HostConfig { + return p.hostConf +} + +func (p *PhaseConfigProvider) Handler() pcontainer.Handler { + return p.handler +} + +func (p *PhaseConfigProvider) Name() string { + return p.name +} + +func (p *PhaseConfigProvider) ErrorWriter() io.Writer { + return p.errorWriter +} + +func (p *PhaseConfigProvider) InfoWriter() io.Writer { + return p.infoWriter +} + +func NullOp() PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) {} +} + +func WithArgs(args ...string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.ctrConf.Cmd = append(provider.ctrConf.Cmd, args...) + } +} + +// WithFlags differs from WithArgs as flags are always prepended +func WithFlags(flags ...string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.ctrConf.Cmd = append(flags, provider.ctrConf.Cmd...) + } +} + +func WithBinds(binds ...string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.hostConf.Binds = append(provider.hostConf.Binds, binds...) + } +} + +func WithDaemonAccess(dockerHost string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + WithRoot()(provider) + if dockerHost == "inherit" { + dockerHost = os.Getenv("DOCKER_HOST") + } + var bind string + if dockerHost == "" { + bind = "/var/run/docker.sock:/var/run/docker.sock" + if provider.os == "windows" { + bind = `\\.\pipe\docker_engine:\\.\pipe\docker_engine` + } + } else { + switch { + case strings.HasPrefix(dockerHost, "unix://"): + bind = fmt.Sprintf("%s:/var/run/docker.sock", strings.TrimPrefix(dockerHost, "unix://")) + case strings.HasPrefix(dockerHost, "npipe://") || strings.HasPrefix(dockerHost, `npipe:\\`): + sub := ([]rune(dockerHost))[8:] + bind = fmt.Sprintf(`%s:\\.\pipe\docker_engine`, string(sub)) + default: + provider.ctrConf.Env = append(provider.ctrConf.Env, fmt.Sprintf(`DOCKER_HOST=%s`, dockerHost)) + } + } + if bind != "" { + provider.hostConf.Binds = append(provider.hostConf.Binds, bind) + } + if provider.os != "windows" { + provider.hostConf.SecurityOpt = []string{"label=disable"} + } + } +} + +func WithEnv(envs ...string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.ctrConf.Env = append(provider.ctrConf.Env, envs...) + } +} + +func WithImage(image string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.ctrConf.Image = image + } +} + +// WithLogPrefix sets a prefix for logs produced by this phase +func WithLogPrefix(prefix string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + if prefix != "" { + provider.infoWriter = logging.NewPrefixWriter(provider.infoWriter, prefix) + provider.errorWriter = logging.NewPrefixWriter(provider.errorWriter, prefix) + } + } +} + +func WithLifecycleProxy(lifecycleExec *LifecycleExecution) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + if lifecycleExec.opts.HTTPProxy != "" { + provider.ctrConf.Env = append(provider.ctrConf.Env, "HTTP_PROXY="+lifecycleExec.opts.HTTPProxy, "http_proxy="+lifecycleExec.opts.HTTPProxy) + } + + if lifecycleExec.opts.HTTPSProxy != "" { + provider.ctrConf.Env = append(provider.ctrConf.Env, "HTTPS_PROXY="+lifecycleExec.opts.HTTPSProxy, "https_proxy="+lifecycleExec.opts.HTTPSProxy) + } + + if lifecycleExec.opts.NoProxy != "" { + provider.ctrConf.Env = append(provider.ctrConf.Env, "NO_PROXY="+lifecycleExec.opts.NoProxy, "no_proxy="+lifecycleExec.opts.NoProxy) + } + } +} + +func WithNetwork(networkMode string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.hostConf.NetworkMode = container.NetworkMode(networkMode) + } +} + +func WithRegistryAccess(authConfig string) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.ctrConf.Env = append(provider.ctrConf.Env, fmt.Sprintf(`CNB_REGISTRY_AUTH=%s`, authConfig)) + } +} + +func WithRoot() PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + if provider.os == "windows" { + provider.ctrConf.User = windowsContainerAdmin + } else { + provider.ctrConf.User = linuxContainerAdmin + } + } +} + +func WithContainerOperations(operations ...ContainerOperation) PhaseConfigProviderOperation { + return func(provider *PhaseConfigProvider) { + provider.containerOps = append(provider.containerOps, operations...) + } +} diff --git a/vendor/github.com/buildpacks/pack/internal/build/phase_factory.go b/vendor/github.com/buildpacks/pack/internal/build/phase_factory.go new file mode 100644 index 0000000000..ff6248b6c6 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/build/phase_factory.go @@ -0,0 +1,39 @@ +package build + +import "context" + +type RunnerCleaner interface { + Run(ctx context.Context) error + Cleanup() error +} + +type PhaseFactory interface { + New(provider *PhaseConfigProvider) RunnerCleaner +} + +type DefaultPhaseFactory struct { + lifecycleExec *LifecycleExecution +} + +type PhaseFactoryCreator func(*LifecycleExecution) PhaseFactory + +func NewDefaultPhaseFactory(lifecycleExec *LifecycleExecution) PhaseFactory { + return &DefaultPhaseFactory{lifecycleExec: lifecycleExec} +} + +func (m *DefaultPhaseFactory) New(provider *PhaseConfigProvider) RunnerCleaner { + return &Phase{ + ctrConf: provider.ContainerConfig(), + hostConf: provider.HostConfig(), + name: provider.Name(), + docker: m.lifecycleExec.docker, + infoWriter: provider.InfoWriter(), + errorWriter: provider.ErrorWriter(), + handler: provider.handler, + uid: m.lifecycleExec.opts.Builder.UID(), + gid: m.lifecycleExec.opts.Builder.GID(), + appPath: m.lifecycleExec.opts.AppPath, + containerOps: provider.containerOps, + fileFilter: m.lifecycleExec.opts.FileFilter, + } +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/builder.go b/vendor/github.com/buildpacks/pack/internal/builder/builder.go new file mode 100644 index 0000000000..e32d70f7a1 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/builder.go @@ -0,0 +1,785 @@ +package builder + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml" + "github.com/buildpacks/imgutil" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/builder" + "github.com/buildpacks/pack/internal/layer" + "github.com/buildpacks/pack/internal/stack" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/archive" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/logging" +) + +const ( + packName = "Pack CLI" + + cnbDir = "/cnb" + + orderPath = "/cnb/order.toml" + stackPath = "/cnb/stack.toml" + platformDir = "/platform" + lifecycleDir = "/cnb/lifecycle" + compatLifecycleDir = "/lifecycle" + workspaceDir = "/workspace" + layersDir = "/layers" + + metadataLabel = "io.buildpacks.builder.metadata" + stackLabel = "io.buildpacks.stack.id" + + EnvUID = "CNB_USER_ID" + EnvGID = "CNB_GROUP_ID" + + BuildpackOnBuilderMessage = `buildpack %s already exists on builder and will be overwritten + - existing diffID: %s + - new diffID: %s` + + BuildpackPreviouslyDefinedMessage = `buildpack %s was previously defined with different contents and will be overwritten + - previous diffID: %s + - using diffID: %s` +) + +// Builder represents a pack builder, used to build images +type Builder struct { + baseImageName string + image imgutil.Image + layerWriterFactory archive.TarWriterFactory + lifecycle Lifecycle + lifecycleDescriptor LifecycleDescriptor + additionalBuildpacks []buildpack.Buildpack + metadata Metadata + mixins []string + env map[string]string + uid, gid int + StackID string + replaceOrder bool + order dist.Order +} + +type orderTOML struct { + Order dist.Order `toml:"order"` +} + +// FromImage constructs a builder from a builder image +func FromImage(img imgutil.Image) (*Builder, error) { + var metadata Metadata + if ok, err := dist.GetLabel(img, metadataLabel, &metadata); err != nil { + return nil, errors.Wrapf(err, "getting label %s", metadataLabel) + } else if !ok { + return nil, fmt.Errorf("builder %s missing label %s -- try recreating builder", style.Symbol(img.Name()), style.Symbol(metadataLabel)) + } + return constructBuilder(img, "", metadata) +} + +// New constructs a new builder from a base image +func New(baseImage imgutil.Image, name string) (*Builder, error) { + var metadata Metadata + if _, err := dist.GetLabel(baseImage, metadataLabel, &metadata); err != nil { + return nil, errors.Wrapf(err, "getting label %s", metadataLabel) + } + return constructBuilder(baseImage, name, metadata) +} + +func constructBuilder(img imgutil.Image, newName string, metadata Metadata) (*Builder, error) { + imageOS, err := img.OS() + if err != nil { + return nil, errors.Wrap(err, "getting image OS") + } + layerWriterFactory, err := layer.NewWriterFactory(imageOS) + if err != nil { + return nil, err + } + + bldr := &Builder{ + baseImageName: img.Name(), + image: img, + layerWriterFactory: layerWriterFactory, + metadata: metadata, + lifecycleDescriptor: constructLifecycleDescriptor(metadata), + env: map[string]string{}, + } + + if err := addImgLabelsToBuildr(bldr); err != nil { + return nil, errors.Wrap(err, "adding image labels to builder") + } + + if newName != "" && img.Name() != newName { + img.Rename(newName) + } + + return bldr, nil +} + +func constructLifecycleDescriptor(metadata Metadata) LifecycleDescriptor { + return CompatDescriptor(LifecycleDescriptor{ + Info: LifecycleInfo{ + Version: metadata.Lifecycle.Version, + }, + API: metadata.Lifecycle.API, + APIs: metadata.Lifecycle.APIs, + }) +} + +func addImgLabelsToBuildr(bldr *Builder) error { + var err error + bldr.uid, bldr.gid, err = userAndGroupIDs(bldr.image) + if err != nil { + return err + } + + bldr.StackID, err = bldr.image.Label(stackLabel) + if err != nil { + return errors.Wrapf(err, "get label %s from image %s", style.Symbol(stackLabel), style.Symbol(bldr.image.Name())) + } + if bldr.StackID == "" { + return fmt.Errorf("image %s missing label %s", style.Symbol(bldr.image.Name()), style.Symbol(stackLabel)) + } + + if _, err = dist.GetLabel(bldr.image, stack.MixinsLabel, &bldr.mixins); err != nil { + return errors.Wrapf(err, "getting label %s", stack.MixinsLabel) + } + + if _, err = dist.GetLabel(bldr.image, OrderLabel, &bldr.order); err != nil { + return errors.Wrapf(err, "getting label %s", OrderLabel) + } + + return nil +} + +// Getters + +// Description returns the builder description +func (b *Builder) Description() string { + return b.metadata.Description +} + +// LifecycleDescriptor returns the LifecycleDescriptor +func (b *Builder) LifecycleDescriptor() LifecycleDescriptor { + return b.lifecycleDescriptor +} + +// Buildpacks returns the buildpack list +func (b *Builder) Buildpacks() []dist.BuildpackInfo { + return b.metadata.Buildpacks +} + +// CreatedBy returns metadata around the creation of the builder +func (b *Builder) CreatedBy() CreatorMetadata { + return b.metadata.CreatedBy +} + +// Order returns the order +func (b *Builder) Order() dist.Order { + return b.order +} + +// Name returns the name of the builder +func (b *Builder) Name() string { + return b.image.Name() +} + +// Image returns the base image +func (b *Builder) Image() imgutil.Image { + return b.image +} + +// Stack returns the stack metadata +func (b *Builder) Stack() StackMetadata { + return b.metadata.Stack +} + +// Mixins returns the mixins of the builder +func (b *Builder) Mixins() []string { + return b.mixins +} + +// UID returns the UID of the builder +func (b *Builder) UID() int { + return b.uid +} + +// GID returns the GID of the builder +func (b *Builder) GID() int { + return b.gid +} + +// Setters + +// AddBuildpack adds a buildpack to the builder +func (b *Builder) AddBuildpack(bp buildpack.Buildpack) { + b.additionalBuildpacks = append(b.additionalBuildpacks, bp) + b.metadata.Buildpacks = append(b.metadata.Buildpacks, bp.Descriptor().Info) +} + +// SetLifecycle sets the lifecycle of the builder +func (b *Builder) SetLifecycle(lifecycle Lifecycle) { + b.lifecycle = lifecycle + b.lifecycleDescriptor = lifecycle.Descriptor() +} + +// SetEnv sets an environment variable to a value +func (b *Builder) SetEnv(env map[string]string) { + b.env = env +} + +// SetOrder sets the order of the builder +func (b *Builder) SetOrder(order dist.Order) { + b.order = order + b.replaceOrder = true +} + +// SetDescription sets the description of the builder +func (b *Builder) SetDescription(description string) { + b.metadata.Description = description +} + +// SetStack sets the stack of the builder +func (b *Builder) SetStack(stackConfig builder.StackConfig) { + b.metadata.Stack = StackMetadata{ + RunImage: RunImageMetadata{ + Image: stackConfig.RunImage, + Mirrors: stackConfig.RunImageMirrors, + }, + } +} + +// Save saves the builder +func (b *Builder) Save(logger logging.Logger, creatorMetadata CreatorMetadata) error { + logger.Debugf("Creating builder with the following buildpacks:") + for _, bpInfo := range b.metadata.Buildpacks { + logger.Debugf("-> %s", style.Symbol(bpInfo.FullName())) + } + + resolvedOrder, err := processOrder(b.metadata.Buildpacks, b.order) + if err != nil { + return errors.Wrap(err, "processing order") + } + + tmpDir, err := ioutil.TempDir("", "create-builder-scratch") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + dirsTar, err := b.defaultDirsLayer(tmpDir) + if err != nil { + return err + } + if err := b.image.AddLayer(dirsTar); err != nil { + return errors.Wrap(err, "adding default dirs layer") + } + + if b.lifecycle != nil { + lifecycleDescriptor := b.lifecycle.Descriptor() + b.metadata.Lifecycle.LifecycleInfo = lifecycleDescriptor.Info + b.metadata.Lifecycle.API = lifecycleDescriptor.API + b.metadata.Lifecycle.APIs = lifecycleDescriptor.APIs + lifecycleTar, err := b.lifecycleLayer(tmpDir) + if err != nil { + return err + } + if err := b.image.AddLayer(lifecycleTar); err != nil { + return errors.Wrap(err, "adding lifecycle layer") + } + } + + if err := validateBuildpacks(b.StackID, b.Mixins(), b.LifecycleDescriptor(), b.Buildpacks(), b.additionalBuildpacks); err != nil { + return errors.Wrap(err, "validating buildpacks") + } + + bpLayers := dist.BuildpackLayers{} + if _, err := dist.GetLabel(b.image, dist.BuildpackLayersLabel, &bpLayers); err != nil { + return errors.Wrapf(err, "getting label %s", dist.BuildpackLayersLabel) + } + + err = addBuildpacks(logger, tmpDir, b.image, b.additionalBuildpacks, bpLayers) + if err != nil { + return err + } + + if err := dist.SetLabel(b.image, dist.BuildpackLayersLabel, bpLayers); err != nil { + return err + } + + if b.replaceOrder { + orderTar, err := b.orderLayer(resolvedOrder, tmpDir) + if err != nil { + return err + } + if err := b.image.AddLayer(orderTar); err != nil { + return errors.Wrap(err, "adding order.tar layer") + } + + if err := dist.SetLabel(b.image, OrderLabel, b.order); err != nil { + return err + } + } + + stackTar, err := b.stackLayer(tmpDir) + if err != nil { + return err + } + if err := b.image.AddLayer(stackTar); err != nil { + return errors.Wrap(err, "adding stack.tar layer") + } + + if len(b.env) > 0 { + logger.Debugf("Provided Environment Variables\n %s", style.Map(b.env, " ", "\n")) + } + + envTar, err := b.envLayer(tmpDir, b.env) + if err != nil { + return err + } + + if err := b.image.AddLayer(envTar); err != nil { + return errors.Wrap(err, "adding env layer") + } + + if creatorMetadata.Name == "" { + creatorMetadata.Name = packName + } + + b.metadata.CreatedBy = creatorMetadata + + if err := dist.SetLabel(b.image, metadataLabel, b.metadata); err != nil { + return err + } + + if err := dist.SetLabel(b.image, stack.MixinsLabel, b.mixins); err != nil { + return err + } + + if err := b.image.SetWorkingDir(layersDir); err != nil { + return errors.Wrap(err, "failed to set working dir") + } + + return b.image.Save() +} + +// Helpers + +func addBuildpacks(logger logging.Logger, tmpDir string, image imgutil.Image, additionalBuildpacks []buildpack.Buildpack, bpLayers dist.BuildpackLayers) error { + type buildpackToAdd struct { + tarPath string + diffID string + buildpack buildpack.Buildpack + } + + buildpacksToAdd := map[string]buildpackToAdd{} + for i, bp := range additionalBuildpacks { + // create buildpack directory + bpTmpDir := filepath.Join(tmpDir, strconv.Itoa(i)) + if err := os.MkdirAll(bpTmpDir, os.ModePerm); err != nil { + return errors.Wrap(err, "creating buildpack temp dir") + } + + // create tar file + bpLayerTar, err := buildpack.ToLayerTar(bpTmpDir, bp) + if err != nil { + return err + } + + // generate diff id + diffID, err := dist.LayerDiffID(bpLayerTar) + if err != nil { + return errors.Wrapf(err, + "getting content hashes for buildpack %s", + style.Symbol(bp.Descriptor().Info.FullName()), + ) + } + + bpInfo := bp.Descriptor().Info + // check against builder layers + if existingBPInfo, ok := bpLayers[bpInfo.ID][bpInfo.Version]; ok { + if existingBPInfo.LayerDiffID == diffID.String() { + logger.Debugf("Buildpack %s already exists on builder with same contents, skipping...", style.Symbol(bpInfo.FullName())) + continue + } + + logger.Debugf(BuildpackOnBuilderMessage, style.Symbol(bpInfo.FullName()), style.Symbol(existingBPInfo.LayerDiffID), style.Symbol(diffID.String())) + } + + // check against other buildpacks to be added + if otherAdditionalBP, ok := buildpacksToAdd[bp.Descriptor().Info.FullName()]; ok { + if otherAdditionalBP.diffID == diffID.String() { + logger.Debugf("Buildpack %s with same contents is already being added, skipping...", style.Symbol(bpInfo.FullName())) + continue + } + + logger.Debugf(BuildpackPreviouslyDefinedMessage, style.Symbol(bpInfo.FullName()), style.Symbol(otherAdditionalBP.diffID), style.Symbol(diffID.String())) + } + + // note: if same id@version is in additionalBuildpacks, last one wins (see warnings above) + buildpacksToAdd[bp.Descriptor().Info.FullName()] = buildpackToAdd{ + tarPath: bpLayerTar, + diffID: diffID.String(), + buildpack: bp, + } + } + + for _, bp := range buildpacksToAdd { + logger.Debugf("Adding buildpack %s (diffID=%s)", style.Symbol(bp.buildpack.Descriptor().Info.FullName()), bp.diffID) + if err := image.AddLayerWithDiffID(bp.tarPath, bp.diffID); err != nil { + return errors.Wrapf(err, + "adding layer tar for buildpack %s", + style.Symbol(bp.buildpack.Descriptor().Info.FullName()), + ) + } + + dist.AddBuildpackToLayersMD(bpLayers, bp.buildpack.Descriptor(), bp.diffID) + } + + return nil +} + +func processOrder(buildpacks []dist.BuildpackInfo, order dist.Order) (dist.Order, error) { + resolvedOrder := dist.Order{} + + for gi, g := range order { + resolvedOrder = append(resolvedOrder, dist.OrderEntry{}) + + for _, bpRef := range g.Group { + var matchingBps []dist.BuildpackInfo + for _, bp := range buildpacks { + if bpRef.ID == bp.ID { + matchingBps = append(matchingBps, bp) + } + } + + if len(matchingBps) == 0 { + return dist.Order{}, fmt.Errorf("no versions of buildpack %s were found on the builder", style.Symbol(bpRef.ID)) + } + + if bpRef.Version == "" { + if len(uniqueVersions(matchingBps)) > 1 { + return dist.Order{}, fmt.Errorf("unable to resolve version: multiple versions of %s - must specify an explicit version", style.Symbol(bpRef.ID)) + } + + bpRef.Version = matchingBps[0].Version + } + + if !hasBuildpackWithVersion(matchingBps, bpRef.Version) { + return dist.Order{}, fmt.Errorf("buildpack %s with version %s was not found on the builder", style.Symbol(bpRef.ID), style.Symbol(bpRef.Version)) + } + + resolvedOrder[gi].Group = append(resolvedOrder[gi].Group, bpRef) + } + } + + return resolvedOrder, nil +} + +func hasBuildpackWithVersion(bps []dist.BuildpackInfo, version string) bool { + for _, bp := range bps { + if bp.Version == version { + return true + } + } + return false +} + +func validateBuildpacks(stackID string, mixins []string, lifecycleDescriptor LifecycleDescriptor, allBuildpacks []dist.BuildpackInfo, bpsToValidate []buildpack.Buildpack) error { + bpLookup := map[string]interface{}{} + + for _, bp := range allBuildpacks { + bpLookup[bp.FullName()] = nil + } + + for _, bp := range bpsToValidate { + bpd := bp.Descriptor() + + // TODO: Warn when Buildpack API is deprecated - https://github.com/buildpacks/pack/issues/788 + compatible := false + for _, version := range append(lifecycleDescriptor.APIs.Buildpack.Supported, lifecycleDescriptor.APIs.Buildpack.Deprecated...) { + compatible = version.Compare(bpd.API) == 0 + if compatible { + break + } + } + + if !compatible { + return fmt.Errorf( + "buildpack %s (Buildpack API %s) is incompatible with lifecycle %s (Buildpack API(s) %s)", + style.Symbol(bpd.Info.FullName()), + bpd.API.String(), + style.Symbol(lifecycleDescriptor.Info.Version.String()), + strings.Join(lifecycleDescriptor.APIs.Buildpack.Supported.AsStrings(), ", "), + ) + } + + if len(bpd.Stacks) >= 1 { // standard buildpack + if err := bpd.EnsureStackSupport(stackID, mixins, false); err != nil { + return err + } + } else { // order buildpack + for _, g := range bpd.Order { + for _, r := range g.Group { + if _, ok := bpLookup[r.FullName()]; !ok { + return fmt.Errorf( + "buildpack %s not found on the builder", + style.Symbol(r.FullName()), + ) + } + } + } + } + } + + return nil +} + +func userAndGroupIDs(img imgutil.Image) (int, int, error) { + sUID, err := img.Env(EnvUID) + if err != nil { + return 0, 0, errors.Wrap(err, "reading builder env variables") + } else if sUID == "" { + return 0, 0, fmt.Errorf("image %s missing required env var %s", style.Symbol(img.Name()), style.Symbol(EnvUID)) + } + + sGID, err := img.Env(EnvGID) + if err != nil { + return 0, 0, errors.Wrap(err, "reading builder env variables") + } else if sGID == "" { + return 0, 0, fmt.Errorf("image %s missing required env var %s", style.Symbol(img.Name()), style.Symbol(EnvGID)) + } + + var uid, gid int + uid, err = strconv.Atoi(sUID) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse %s, value %s should be an integer", style.Symbol(EnvUID), style.Symbol(sUID)) + } + + gid, err = strconv.Atoi(sGID) + if err != nil { + return 0, 0, fmt.Errorf("failed to parse %s, value %s should be an integer", style.Symbol(EnvGID), style.Symbol(sGID)) + } + + return uid, gid, nil +} + +func uniqueVersions(buildpacks []dist.BuildpackInfo) []string { + results := []string{} + set := map[string]interface{}{} + for _, bpInfo := range buildpacks { + _, ok := set[bpInfo.Version] + if !ok { + results = append(results, bpInfo.Version) + set[bpInfo.Version] = true + } + } + return results +} + +func (b *Builder) defaultDirsLayer(dest string) (string, error) { + fh, err := os.Create(filepath.Join(dest, "dirs.tar")) + if err != nil { + return "", err + } + defer fh.Close() + + lw := b.layerWriterFactory.NewWriter(fh) + defer lw.Close() + + ts := archive.NormalizedDateTime + + for _, path := range []string{workspaceDir, layersDir} { + if err := lw.WriteHeader(b.packOwnedDir(path, ts)); err != nil { + return "", errors.Wrapf(err, "creating %s dir in layer", style.Symbol(path)) + } + } + + // can't use filepath.Join(), to ensure Windows doesn't transform it to Windows join + for _, path := range []string{cnbDir, dist.BuildpacksDir, platformDir, platformDir + "/env"} { + if err := lw.WriteHeader(b.rootOwnedDir(path, ts)); err != nil { + return "", errors.Wrapf(err, "creating %s dir in layer", style.Symbol(path)) + } + } + + return fh.Name(), nil +} + +func (b *Builder) packOwnedDir(path string, time time.Time) *tar.Header { + return &tar.Header{ + Typeflag: tar.TypeDir, + Name: path, + Mode: 0755, + ModTime: time, + Uid: b.uid, + Gid: b.gid, + } +} + +func (b *Builder) rootOwnedDir(path string, time time.Time) *tar.Header { + return &tar.Header{ + Typeflag: tar.TypeDir, + Name: path, + Mode: 0755, + ModTime: time, + } +} + +func (b *Builder) lifecycleLayer(dest string) (string, error) { + fh, err := os.Create(filepath.Join(dest, "lifecycle.tar")) + if err != nil { + return "", err + } + defer fh.Close() + + lw := b.layerWriterFactory.NewWriter(fh) + defer lw.Close() + + if err := lw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: lifecycleDir, + Mode: 0755, + ModTime: archive.NormalizedDateTime, + }); err != nil { + return "", err + } + + err = b.embedLifecycleTar(lw) + if err != nil { + return "", errors.Wrap(err, "embedding lifecycle tar") + } + + if err := lw.WriteHeader(&tar.Header{ + Name: compatLifecycleDir, + Linkname: lifecycleDir, + Typeflag: tar.TypeSymlink, + Mode: 0644, + ModTime: archive.NormalizedDateTime, + }); err != nil { + return "", errors.Wrapf(err, "creating %s symlink", style.Symbol(compatLifecycleDir)) + } + + return fh.Name(), nil +} + +func (b *Builder) embedLifecycleTar(tw archive.TarWriter) error { + var regex = regexp.MustCompile(`^[^/]+/([^/]+)$`) + + lr, err := b.lifecycle.Open() + if err != nil { + return errors.Wrap(err, "failed to open lifecycle") + } + defer lr.Close() + tr := tar.NewReader(lr) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "failed to get next tar entry") + } + + pathMatches := regex.FindStringSubmatch(path.Clean(header.Name)) + if pathMatches != nil { + binaryName := pathMatches[1] + + header.Name = lifecycleDir + "/" + binaryName + err = tw.WriteHeader(header) + if err != nil { + return errors.Wrapf(err, "failed to write header for '%s'", header.Name) + } + + buf, err := ioutil.ReadAll(tr) + if err != nil { + return errors.Wrapf(err, "failed to read contents of '%s'", header.Name) + } + + _, err = tw.Write(buf) + if err != nil { + return errors.Wrapf(err, "failed to write contents to '%s'", header.Name) + } + } + } + + return nil +} + +func (b *Builder) orderLayer(order dist.Order, dest string) (string, error) { + contents, err := orderFileContents(order) + if err != nil { + return "", err + } + + layerTar := filepath.Join(dest, "order.tar") + err = layer.CreateSingleFileTar(layerTar, orderPath, contents, b.layerWriterFactory) + if err != nil { + return "", errors.Wrapf(err, "failed to create order.toml layer tar") + } + + return layerTar, nil +} + +func orderFileContents(order dist.Order) (string, error) { + buf := &bytes.Buffer{} + + tomlData := orderTOML{Order: order} + if err := toml.NewEncoder(buf).Encode(tomlData); err != nil { + return "", errors.Wrapf(err, "failed to marshal order.toml") + } + return buf.String(), nil +} + +func (b *Builder) stackLayer(dest string) (string, error) { + buf := &bytes.Buffer{} + err := toml.NewEncoder(buf).Encode(b.metadata.Stack) + if err != nil { + return "", errors.Wrapf(err, "failed to marshal stack.toml") + } + + layerTar := filepath.Join(dest, "stack.tar") + err = layer.CreateSingleFileTar(layerTar, stackPath, buf.String(), b.layerWriterFactory) + if err != nil { + return "", errors.Wrapf(err, "failed to create stack.toml layer tar") + } + + return layerTar, nil +} + +func (b *Builder) envLayer(dest string, env map[string]string) (string, error) { + fh, err := os.Create(filepath.Join(dest, "env.tar")) + if err != nil { + return "", err + } + defer fh.Close() + + lw := b.layerWriterFactory.NewWriter(fh) + defer lw.Close() + + for k, v := range env { + if err := lw.WriteHeader(&tar.Header{ + Name: path.Join(platformDir, "env", k), + Size: int64(len(v)), + Mode: 0644, + ModTime: archive.NormalizedDateTime, + }); err != nil { + return "", err + } + if _, err := lw.Write([]byte(v)); err != nil { + return "", err + } + } + + return fh.Name(), nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/descriptor.go b/vendor/github.com/buildpacks/pack/internal/builder/descriptor.go new file mode 100644 index 0000000000..22017db2e7 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/descriptor.go @@ -0,0 +1,111 @@ +package builder + +import ( + "github.com/BurntSushi/toml" + "github.com/buildpacks/lifecycle/api" + "github.com/pkg/errors" +) + +// LifecycleDescriptor contains information described in the lifecycle.toml +type LifecycleDescriptor struct { + Info LifecycleInfo `toml:"lifecycle"` + // Deprecated: Use `LifecycleAPIs` instead + API LifecycleAPI `toml:"api"` + APIs LifecycleAPIs `toml:"apis"` +} + +// LifecycleInfo contains information about the lifecycle +type LifecycleInfo struct { + Version *Version `toml:"version" json:"version" yaml:"version"` +} + +// LifecycleAPI describes which API versions the lifecycle satisfies +type LifecycleAPI struct { + BuildpackVersion *api.Version `toml:"buildpack" json:"buildpack"` + PlatformVersion *api.Version `toml:"platform" json:"platform"` +} + +// LifecycleAPIs describes the supported API versions per specification +type LifecycleAPIs struct { + Buildpack APIVersions `toml:"buildpack" json:"buildpack"` + Platform APIVersions `toml:"platform" json:"platform"` +} + +type APISet []*api.Version + +func (a APISet) search(comp func(prevMatch, value *api.Version) bool) *api.Version { + var match *api.Version + for _, version := range a { + switch { + case version == nil: + continue + case match == nil: + match = version + case comp(match, version): + match = version + } + } + + return match +} + +func (a APISet) Earliest() *api.Version { + return a.search(func(prevMatch, value *api.Version) bool { return value.Compare(prevMatch) < 0 }) +} + +func (a APISet) Latest() *api.Version { + return a.search(func(prevMatch, value *api.Version) bool { return value.Compare(prevMatch) > 0 }) +} + +func (a APISet) AsStrings() []string { + verStrings := make([]string, len(a)) + for i, version := range a { + verStrings[i] = version.String() + } + + return verStrings +} + +// APIVersions describes the supported API versions +type APIVersions struct { + Deprecated APISet `toml:"deprecated" json:"deprecated" yaml:"deprecated"` + Supported APISet `toml:"supported" json:"supported" yaml:"supported"` +} + +// ParseDescriptor parses LifecycleDescriptor from toml formatted string. +func ParseDescriptor(contents string) (LifecycleDescriptor, error) { + descriptor := LifecycleDescriptor{} + _, err := toml.Decode(contents, &descriptor) + if err != nil { + return descriptor, errors.Wrap(err, "decoding descriptor") + } + + return descriptor, nil +} + +// CompatDescriptor provides compatibility by mapping new fields to old and vice-versa +func CompatDescriptor(descriptor LifecycleDescriptor) LifecycleDescriptor { + if len(descriptor.APIs.Buildpack.Supported) != 0 || len(descriptor.APIs.Platform.Supported) != 0 { + // select earliest value for deprecated parameters + if len(descriptor.APIs.Buildpack.Supported) != 0 { + descriptor.API.BuildpackVersion = + append(descriptor.APIs.Buildpack.Deprecated, descriptor.APIs.Buildpack.Supported...).Earliest() + } + if len(descriptor.APIs.Platform.Supported) != 0 { + descriptor.API.PlatformVersion = + append(descriptor.APIs.Platform.Deprecated, descriptor.APIs.Platform.Supported...).Earliest() + } + } else if descriptor.API.BuildpackVersion != nil && descriptor.API.PlatformVersion != nil { + // fill supported with deprecated field + descriptor.APIs = LifecycleAPIs{ + Buildpack: APIVersions{ + Supported: APISet{descriptor.API.BuildpackVersion}, + }, + Platform: APIVersions{ + Supported: APISet{descriptor.API.PlatformVersion}, + }, + } + } + + return descriptor +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/detection_order_calculator.go b/vendor/github.com/buildpacks/pack/internal/builder/detection_order_calculator.go new file mode 100644 index 0000000000..d62ad49057 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/detection_order_calculator.go @@ -0,0 +1,106 @@ +package builder + +import ( + pubbldr "github.com/buildpacks/pack/builder" + "github.com/buildpacks/pack/pkg/dist" +) + +type DetectionOrderCalculator struct{} + +func NewDetectionOrderCalculator() *DetectionOrderCalculator { + return &DetectionOrderCalculator{} +} + +type detectionOrderRecurser struct { + layers dist.BuildpackLayers + maxDepth int +} + +func newDetectionOrderRecurser(layers dist.BuildpackLayers, maxDepth int) *detectionOrderRecurser { + return &detectionOrderRecurser{ + layers: layers, + maxDepth: maxDepth, + } +} + +func (c *DetectionOrderCalculator) Order( + order dist.Order, + layers dist.BuildpackLayers, + maxDepth int, +) (pubbldr.DetectionOrder, error) { + recurser := newDetectionOrderRecurser(layers, maxDepth) + + return recurser.detectionOrderFromOrder(order, dist.BuildpackRef{}, 0, map[string]interface{}{}), nil +} + +func (r *detectionOrderRecurser) detectionOrderFromOrder( + order dist.Order, + parentBuildpack dist.BuildpackRef, + currentDepth int, + visited map[string]interface{}, +) pubbldr.DetectionOrder { + var detectionOrder pubbldr.DetectionOrder + for _, orderEntry := range order { + visitedCopy := copyMap(visited) + groupDetectionOrder := r.detectionOrderFromGroup(orderEntry.Group, currentDepth, visitedCopy) + + detectionOrderEntry := pubbldr.DetectionOrderEntry{ + BuildpackRef: parentBuildpack, + GroupDetectionOrder: groupDetectionOrder, + } + + detectionOrder = append(detectionOrder, detectionOrderEntry) + } + + return detectionOrder +} + +func (r *detectionOrderRecurser) detectionOrderFromGroup( + group []dist.BuildpackRef, + currentDepth int, + visited map[string]interface{}, +) pubbldr.DetectionOrder { + var groupDetectionOrder pubbldr.DetectionOrder + + for _, bp := range group { + _, bpSeen := visited[bp.FullName()] + if !bpSeen { + visited[bp.FullName()] = true + } + + layer, ok := r.layers.Get(bp.ID, bp.Version) + if ok && len(layer.Order) > 0 && r.shouldGoDeeper(currentDepth) && !bpSeen { + groupOrder := r.detectionOrderFromOrder(layer.Order, bp, currentDepth+1, visited) + groupDetectionOrder = append(groupDetectionOrder, groupOrder...) + } else { + groupDetectionOrderEntry := pubbldr.DetectionOrderEntry{ + BuildpackRef: bp, + Cyclical: bpSeen, + } + groupDetectionOrder = append(groupDetectionOrder, groupDetectionOrderEntry) + } + } + + return groupDetectionOrder +} + +func (r *detectionOrderRecurser) shouldGoDeeper(currentDepth int) bool { + if r.maxDepth == pubbldr.OrderDetectionMaxDepth { + return true + } + + if currentDepth < r.maxDepth { + return true + } + + return false +} + +func copyMap(toCopy map[string]interface{}) map[string]interface{} { + result := make(map[string]interface{}, len(toCopy)) + for key := range toCopy { + result[key] = true + } + + return result +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/image_fetcher_wrapper.go b/vendor/github.com/buildpacks/pack/internal/builder/image_fetcher_wrapper.go new file mode 100644 index 0000000000..dfb0de4a83 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/image_fetcher_wrapper.go @@ -0,0 +1,34 @@ +package builder + +import ( + "context" + + "github.com/buildpacks/imgutil" + + "github.com/buildpacks/pack/pkg/image" +) + +type ImageFetcher interface { + // Fetch fetches an image by resolving it both remotely and locally depending on provided parameters. + // If daemon is true, it will look return a `local.Image`. Pull, applicable only when daemon is true, will + // attempt to pull a remote image first. + Fetch(ctx context.Context, name string, options image.FetchOptions) (imgutil.Image, error) +} + +type ImageFetcherWrapper struct { + fetcher ImageFetcher +} + +func NewImageFetcherWrapper(fetcher ImageFetcher) *ImageFetcherWrapper { + return &ImageFetcherWrapper{ + fetcher: fetcher, + } +} + +func (w *ImageFetcherWrapper) Fetch( + ctx context.Context, + name string, + options image.FetchOptions, +) (Inspectable, error) { + return w.fetcher.Fetch(ctx, name, options) +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/inspect.go b/vendor/github.com/buildpacks/pack/internal/builder/inspect.go new file mode 100644 index 0000000000..7a3cc6ca09 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/inspect.go @@ -0,0 +1,158 @@ +package builder + +import ( + "context" + "fmt" + "sort" + "strings" + + pubbldr "github.com/buildpacks/pack/builder" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +type Info struct { + Description string + StackID string + Mixins []string + RunImage string + RunImageMirrors []string + Buildpacks []dist.BuildpackInfo + Order pubbldr.DetectionOrder + BuildpackLayers dist.BuildpackLayers + Lifecycle LifecycleDescriptor + CreatedBy CreatorMetadata +} + +type Inspectable interface { + Label(name string) (string, error) +} + +type InspectableFetcher interface { + Fetch(ctx context.Context, name string, options image.FetchOptions) (Inspectable, error) +} + +type LabelManagerFactory interface { + BuilderLabelManager(inspectable Inspectable) LabelInspector +} + +type LabelInspector interface { + Metadata() (Metadata, error) + StackID() (string, error) + Mixins() ([]string, error) + Order() (dist.Order, error) + BuildpackLayers() (dist.BuildpackLayers, error) +} + +type DetectionCalculator interface { + Order(topOrder dist.Order, layers dist.BuildpackLayers, depth int) (pubbldr.DetectionOrder, error) +} + +type Inspector struct { + imageFetcher InspectableFetcher + labelManagerFactory LabelManagerFactory + detectionOrderCalculator DetectionCalculator +} + +func NewInspector(fetcher InspectableFetcher, factory LabelManagerFactory, calculator DetectionCalculator) *Inspector { + return &Inspector{ + imageFetcher: fetcher, + labelManagerFactory: factory, + detectionOrderCalculator: calculator, + } +} + +func (i *Inspector) Inspect(name string, daemon bool, orderDetectionDepth int) (Info, error) { + inspectable, err := i.imageFetcher.Fetch(context.Background(), name, image.FetchOptions{Daemon: daemon, PullPolicy: image.PullNever}) + if err != nil { + return Info{}, fmt.Errorf("fetching builder image: %w", err) + } + + labelManager := i.labelManagerFactory.BuilderLabelManager(inspectable) + + metadata, err := labelManager.Metadata() + if err != nil { + return Info{}, fmt.Errorf("reading image metadata: %w", err) + } + + stackID, err := labelManager.StackID() + if err != nil { + return Info{}, fmt.Errorf("reading image stack id: %w", err) + } + + mixins, err := labelManager.Mixins() + if err != nil { + return Info{}, fmt.Errorf("reading image mixins: %w", err) + } + + var commonMixins, buildMixins []string + commonMixins = []string{} + for _, mixin := range mixins { + if strings.HasPrefix(mixin, "build:") { + buildMixins = append(buildMixins, mixin) + } else { + commonMixins = append(commonMixins, mixin) + } + } + + order, err := labelManager.Order() + if err != nil { + return Info{}, fmt.Errorf("reading image order: %w", err) + } + + layers, err := labelManager.BuildpackLayers() + if err != nil { + return Info{}, fmt.Errorf("reading image buildpack layers: %w", err) + } + + detectionOrder, err := i.detectionOrderCalculator.Order(order, layers, orderDetectionDepth) + if err != nil { + return Info{}, fmt.Errorf("calculating detection order: %w", err) + } + + lifecycle := CompatDescriptor(LifecycleDescriptor{ + Info: LifecycleInfo{Version: metadata.Lifecycle.Version}, + API: metadata.Lifecycle.API, + APIs: metadata.Lifecycle.APIs, + }) + + return Info{ + Description: metadata.Description, + StackID: stackID, + Mixins: append(commonMixins, buildMixins...), + RunImage: metadata.Stack.RunImage.Image, + RunImageMirrors: metadata.Stack.RunImage.Mirrors, + Buildpacks: sortBuildPacksByID(uniqueBuildpacks(metadata.Buildpacks)), + Order: detectionOrder, + BuildpackLayers: layers, + Lifecycle: lifecycle, + CreatedBy: metadata.CreatedBy, + }, nil +} + +func uniqueBuildpacks(buildpacks []dist.BuildpackInfo) []dist.BuildpackInfo { + foundBuildpacks := map[string]interface{}{} + var uniqueBuildpacks []dist.BuildpackInfo + + for _, bp := range buildpacks { + _, ok := foundBuildpacks[bp.FullName()] + if !ok { + uniqueBuildpacks = append(uniqueBuildpacks, bp) + foundBuildpacks[bp.FullName()] = true + } + } + + return uniqueBuildpacks +} + +func sortBuildPacksByID(buildpacks []dist.BuildpackInfo) []dist.BuildpackInfo { + sort.Slice(buildpacks, func(i int, j int) bool { + if buildpacks[i].ID == buildpacks[j].ID { + return buildpacks[i].Version < buildpacks[j].Version + } + + return buildpacks[i].ID < buildpacks[j].ID + }) + + return buildpacks +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/label_manager.go b/vendor/github.com/buildpacks/pack/internal/builder/label_manager.go new file mode 100644 index 0000000000..cdbfe760fd --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/label_manager.go @@ -0,0 +1,91 @@ +package builder + +import ( + "encoding/json" + "fmt" + + "github.com/buildpacks/pack/pkg/dist" + + "github.com/buildpacks/pack/internal/stack" +) + +type LabelManager struct { + inspectable Inspectable +} + +func NewLabelManager(inspectable Inspectable) *LabelManager { + return &LabelManager{inspectable: inspectable} +} + +func (m *LabelManager) Metadata() (Metadata, error) { + var parsedMetadata Metadata + err := m.labelJSON(metadataLabel, &parsedMetadata) + return parsedMetadata, err +} + +func (m *LabelManager) StackID() (string, error) { + return m.labelContent(stackLabel) +} + +func (m *LabelManager) Mixins() ([]string, error) { + parsedMixins := []string{} + err := m.labelJSONDefaultEmpty(stack.MixinsLabel, &parsedMixins) + return parsedMixins, err +} + +func (m *LabelManager) Order() (dist.Order, error) { + parsedOrder := dist.Order{} + err := m.labelJSONDefaultEmpty(OrderLabel, &parsedOrder) + return parsedOrder, err +} + +func (m *LabelManager) BuildpackLayers() (dist.BuildpackLayers, error) { + parsedLayers := dist.BuildpackLayers{} + err := m.labelJSONDefaultEmpty(dist.BuildpackLayersLabel, &parsedLayers) + return parsedLayers, err +} + +func (m *LabelManager) labelContent(labelName string) (string, error) { + content, err := m.inspectable.Label(labelName) + if err != nil { + return "", fmt.Errorf("getting label %s: %w", labelName, err) + } + + if content == "" { + return "", fmt.Errorf("builder missing label %s -- try recreating builder", labelName) + } + + return content, nil +} + +func (m *LabelManager) labelJSON(labelName string, targetObject interface{}) error { + rawContent, err := m.labelContent(labelName) + if err != nil { + return err + } + + err = json.Unmarshal([]byte(rawContent), targetObject) + if err != nil { + return fmt.Errorf("parsing label content for %s: %w", labelName, err) + } + + return nil +} + +func (m *LabelManager) labelJSONDefaultEmpty(labelName string, targetObject interface{}) error { + rawContent, err := m.inspectable.Label(labelName) + if err != nil { + return fmt.Errorf("getting label %s: %w", labelName, err) + } + + if rawContent == "" { + return nil + } + + err = json.Unmarshal([]byte(rawContent), targetObject) + if err != nil { + return fmt.Errorf("parsing label content for %s: %w", labelName, err) + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/label_manager_provider.go b/vendor/github.com/buildpacks/pack/internal/builder/label_manager_provider.go new file mode 100644 index 0000000000..4c701eda26 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/label_manager_provider.go @@ -0,0 +1,11 @@ +package builder + +type LabelManagerProvider struct{} + +func NewLabelManagerProvider() *LabelManagerProvider { + return &LabelManagerProvider{} +} + +func (p *LabelManagerProvider) BuilderLabelManager(inspectable Inspectable) LabelInspector { + return NewLabelManager(inspectable) +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/lifecycle.go b/vendor/github.com/buildpacks/pack/internal/builder/lifecycle.go new file mode 100644 index 0000000000..403355824d --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/lifecycle.go @@ -0,0 +1,121 @@ +package builder + +import ( + "archive/tar" + "fmt" + "io" + "path" + "regexp" + + "github.com/pkg/errors" + + "github.com/buildpacks/pack/pkg/archive" +) + +// A snapshot of the latest tested lifecycle version values +const ( + DefaultLifecycleVersion = "0.11.3" + DefaultBuildpackAPIVersion = "0.2" +) + +// Blob is an interface to wrap opening blobs +type Blob interface { + Open() (io.ReadCloser, error) +} + +// Lifecycle is an implementation of the CNB Lifecycle spec +//go:generate mockgen -package testmocks -destination testmocks/mock_lifecycle.go github.com/buildpacks/pack/internal/builder Lifecycle +type Lifecycle interface { + Blob + Descriptor() LifecycleDescriptor +} + +type lifecycle struct { + descriptor LifecycleDescriptor + Blob +} + +// NewLifecycle creates a Lifecycle from a Blob +func NewLifecycle(blob Blob) (Lifecycle, error) { + var err error + + br, err := blob.Open() + if err != nil { + return nil, errors.Wrap(err, "open lifecycle blob") + } + defer br.Close() + + _, buf, err := archive.ReadTarEntry(br, "lifecycle.toml") + if err != nil && errors.Cause(err) == archive.ErrEntryNotExist { + return nil, err + } else if err != nil { + return nil, errors.Wrap(err, "reading lifecycle descriptor") + } + + lifecycleDescriptor, err := ParseDescriptor(string(buf)) + if err != nil { + return nil, err + } + + lifecycle := &lifecycle{Blob: blob, descriptor: CompatDescriptor(lifecycleDescriptor)} + + if err = lifecycle.validateBinaries(); err != nil { + return nil, errors.Wrap(err, "validating binaries") + } + + return lifecycle, nil +} + +// Descriptor returns the LifecycleDescriptor +func (l *lifecycle) Descriptor() LifecycleDescriptor { + return l.descriptor +} + +func (l *lifecycle) validateBinaries() error { + rc, err := l.Open() + if err != nil { + return errors.Wrap(err, "create lifecycle blob reader") + } + defer rc.Close() + regex := regexp.MustCompile(`^[^/]+/([^/]+)$`) + headers := map[string]bool{} + tr := tar.NewReader(rc) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "failed to get next tar entry") + } + + pathMatches := regex.FindStringSubmatch(path.Clean(header.Name)) + if pathMatches != nil { + headers[pathMatches[1]] = true + } + } + for _, p := range l.binaries() { + _, found := headers[p] + if !found { + _, found = headers[p+".exe"] + if !found { + return fmt.Errorf("did not find '%s' in tar", p) + } + } + } + return nil +} + +// Binaries returns a list of all binaries contained in the lifecycle. +func (l *lifecycle) binaries() []string { + binaries := []string{ + "detector", + "restorer", + "analyzer", + "builder", + "exporter", + "launcher", + "creator", + } + return binaries +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/metadata.go b/vendor/github.com/buildpacks/pack/internal/builder/metadata.go new file mode 100644 index 0000000000..111ec4ef83 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/metadata.go @@ -0,0 +1,36 @@ +package builder + +import "github.com/buildpacks/pack/pkg/dist" + +const ( + OrderLabel = "io.buildpacks.buildpack.order" +) + +type Metadata struct { + Description string `json:"description"` + Buildpacks []dist.BuildpackInfo `json:"buildpacks"` + Stack StackMetadata `json:"stack"` + Lifecycle LifecycleMetadata `json:"lifecycle"` + CreatedBy CreatorMetadata `json:"createdBy"` +} + +type CreatorMetadata struct { + Name string `json:"name" yaml:"name"` + Version string `json:"version" yaml:"version"` +} + +type LifecycleMetadata struct { + LifecycleInfo + // Deprecated: use APIs instead + API LifecycleAPI `json:"api"` + APIs LifecycleAPIs `json:"apis"` +} + +type StackMetadata struct { + RunImage RunImageMetadata `json:"runImage" toml:"run-image"` +} + +type RunImageMetadata struct { + Image string `json:"image" toml:"image"` + Mirrors []string `json:"mirrors" toml:"mirrors"` +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/suggested_builder.go b/vendor/github.com/buildpacks/pack/internal/builder/suggested_builder.go new file mode 100644 index 0000000000..7084f089cd --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/suggested_builder.go @@ -0,0 +1,40 @@ +package builder + +type SuggestedBuilder struct { + Vendor string + Image string + DefaultDescription string +} + +var SuggestedBuilders = []SuggestedBuilder{ + { + Vendor: "Google", + Image: "gcr.io/buildpacks/builder:v1", + DefaultDescription: "GCP Builder for all runtimes", + }, + { + Vendor: "Heroku", + Image: "heroku/buildpacks:18", + DefaultDescription: "heroku-18 base image with buildpacks for Ruby, Java, Node.js, Python, Golang, & PHP", + }, + { + Vendor: "Heroku", + Image: "heroku/buildpacks:20", + DefaultDescription: "heroku-20 base image with buildpacks for Ruby, Java, Node.js, Python, Golang, & PHP", + }, + { + Vendor: "Paketo Buildpacks", + Image: "paketobuildpacks/builder:base", + DefaultDescription: "Small base image with buildpacks for Java, Node.js, Golang, & .NET Core", + }, + { + Vendor: "Paketo Buildpacks", + Image: "paketobuildpacks/builder:full", + DefaultDescription: "Larger base image with buildpacks for Java, Node.js, Golang, .NET Core, & PHP", + }, + { + Vendor: "Paketo Buildpacks", + Image: "paketobuildpacks/builder:tiny", + DefaultDescription: "Tiny base image (bionic build image, distroless run image) with buildpacks for Golang", + }, +} diff --git a/vendor/github.com/buildpacks/pack/internal/builder/version.go b/vendor/github.com/buildpacks/pack/internal/builder/version.go new file mode 100644 index 0000000000..f926419bed --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/builder/version.go @@ -0,0 +1,47 @@ +package builder + +import ( + "github.com/Masterminds/semver" + "github.com/pkg/errors" +) + +// Version is an extension to semver.Version to make it marshalable. +type Version struct { + semver.Version +} + +// VersionMustParse parses a string into a Version +func VersionMustParse(v string) *Version { + return &Version{Version: *semver.MustParse(v)} +} + +// String returns the string value of the Version +func (v *Version) String() string { + return v.Version.String() +} + +// Equal compares two Versions +func (v *Version) Equal(other *Version) bool { + if other != nil { + return v.Version.Equal(&other.Version) + } + + return false +} + +// MarshalText makes Version satisfy the encoding.TextMarshaler interface. +func (v *Version) MarshalText() ([]byte, error) { + return []byte(v.Version.Original()), nil +} + +// UnmarshalText makes Version satisfy the encoding.TextUnmarshaler interface. +func (v *Version) UnmarshalText(text []byte) error { + s := string(text) + w, err := semver.NewVersion(s) + if err != nil { + return errors.Wrapf(err, "invalid semantic version %s", s) + } + + v.Version = *w + return nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/cache/consts.go b/vendor/github.com/buildpacks/pack/internal/cache/consts.go new file mode 100644 index 0000000000..80ae0ef1ce --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/cache/consts.go @@ -0,0 +1,8 @@ +package cache + +const ( + Image Type = iota + Volume +) + +type Type int diff --git a/vendor/github.com/buildpacks/pack/internal/cache/image_cache.go b/vendor/github.com/buildpacks/pack/internal/cache/image_cache.go new file mode 100644 index 0000000000..158c00a027 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/cache/image_cache.go @@ -0,0 +1,39 @@ +package cache + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/google/go-containerregistry/pkg/name" +) + +type ImageCache struct { + docker client.CommonAPIClient + image string +} + +func NewImageCache(imageRef name.Reference, dockerClient client.CommonAPIClient) *ImageCache { + return &ImageCache{ + image: imageRef.Name(), + docker: dockerClient, + } +} + +func (c *ImageCache) Name() string { + return c.image +} + +func (c *ImageCache) Clear(ctx context.Context) error { + _, err := c.docker.ImageRemove(ctx, c.Name(), types.ImageRemoveOptions{ + Force: true, + }) + if err != nil && !client.IsErrNotFound(err) { + return err + } + return nil +} + +func (c *ImageCache) Type() Type { + return Image +} diff --git a/vendor/github.com/buildpacks/pack/internal/cache/volume_cache.go b/vendor/github.com/buildpacks/pack/internal/cache/volume_cache.go new file mode 100644 index 0000000000..fa0124e50a --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/cache/volume_cache.go @@ -0,0 +1,52 @@ +package cache + +import ( + "context" + "crypto/sha256" + "fmt" + "strings" + + "github.com/docker/docker/client" + "github.com/google/go-containerregistry/pkg/name" + + "github.com/buildpacks/pack/internal/paths" +) + +type VolumeCache struct { + docker client.CommonAPIClient + volume string +} + +func NewVolumeCache(imageRef name.Reference, suffix string, dockerClient client.CommonAPIClient) *VolumeCache { + sum := sha256.Sum256([]byte(imageRef.Name())) + + vol := paths.FilterReservedNames(fmt.Sprintf("%s-%x", sanitizedRef(imageRef), sum[:6])) + return &VolumeCache{ + volume: fmt.Sprintf("pack-cache-%s.%s", vol, suffix), + docker: dockerClient, + } +} + +func (c *VolumeCache) Name() string { + return c.volume +} + +func (c *VolumeCache) Clear(ctx context.Context) error { + err := c.docker.VolumeRemove(ctx, c.Name(), true) + if err != nil && !client.IsErrNotFound(err) { + return err + } + return nil +} + +func (c *VolumeCache) Type() Type { + return Volume +} + +// note image names and volume names are validated using the same restrictions: +// see https://github.com/moby/moby/blob/f266f13965d5bfb1825afa181fe6c32f3a597fa3/daemon/names/names.go#L5 +func sanitizedRef(ref name.Reference) string { + result := strings.TrimPrefix(ref.Context().String(), ref.Context().RegistryStr()+"/") + result = strings.ReplaceAll(result, "/", "_") + return fmt.Sprintf("%s_%s", result, ref.Identifier()) +} diff --git a/vendor/github.com/buildpacks/pack/internal/config/config.go b/vendor/github.com/buildpacks/pack/internal/config/config.go new file mode 100644 index 0000000000..5af064f5f9 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/config/config.go @@ -0,0 +1,131 @@ +package config + +import ( + "os" + "path/filepath" + + "github.com/BurntSushi/toml" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" +) + +type Config struct { + // Deprecated: Use DefaultRegistryName instead. See https://github.com/buildpacks/pack/issues/747. + DefaultRegistry string `toml:"default-registry-url,omitempty"` + DefaultRegistryName string `toml:"default-registry,omitempty"` + DefaultBuilder string `toml:"default-builder-image,omitempty"` + PullPolicy string `toml:"pull-policy,omitempty"` + Experimental bool `toml:"experimental,omitempty"` + RunImages []RunImage `toml:"run-images"` + TrustedBuilders []TrustedBuilder `toml:"trusted-builders,omitempty"` + Registries []Registry `toml:"registries,omitempty"` + LifecycleImage string `toml:"lifecycle-image,omitempty"` + RegistryMirrors map[string]string `toml:"registry-mirrors,omitempty"` +} + +type Registry struct { + Name string `toml:"name"` + Type string `toml:"type"` + URL string `toml:"url"` +} + +type RunImage struct { + Image string `toml:"image"` + Mirrors []string `toml:"mirrors"` +} + +type TrustedBuilder struct { + Name string `toml:"name"` +} + +const OfficialRegistryName = "official" + +func DefaultRegistry() Registry { + return Registry{ + OfficialRegistryName, + "github", + "https://github.com/buildpacks/registry-index", + } +} + +func DefaultConfigPath() (string, error) { + home, err := PackHome() + if err != nil { + return "", errors.Wrap(err, "getting pack home") + } + return filepath.Join(home, "config.toml"), nil +} + +func PackHome() (string, error) { + packHome := os.Getenv("PACK_HOME") + if packHome == "" { + home, err := os.UserHomeDir() + if err != nil { + return "", errors.Wrap(err, "getting user home") + } + packHome = filepath.Join(home, ".pack") + } + return packHome, nil +} + +func Read(path string) (Config, error) { + cfg := Config{} + _, err := toml.DecodeFile(path, &cfg) + if err != nil && !os.IsNotExist(err) { + return Config{}, errors.Wrapf(err, "failed to read config file at path %s", path) + } + + return cfg, nil +} + +func Write(cfg Config, path string) error { + if err := MkdirAll(filepath.Dir(path)); err != nil { + return err + } + w, err := os.Create(path) + if err != nil { + return err + } + defer w.Close() + + return toml.NewEncoder(w).Encode(cfg) +} + +func MkdirAll(path string) error { + return os.MkdirAll(path, 0750) +} + +func SetRunImageMirrors(cfg Config, image string, mirrors []string) Config { + for i := range cfg.RunImages { + if cfg.RunImages[i].Image == image { + cfg.RunImages[i].Mirrors = mirrors + return cfg + } + } + cfg.RunImages = append(cfg.RunImages, RunImage{Image: image, Mirrors: mirrors}) + return cfg +} + +func GetRegistries(cfg Config) []Registry { + return append(cfg.Registries, DefaultRegistry()) +} + +func GetRegistry(cfg Config, registryName string) (Registry, error) { + if registryName == "" && cfg.DefaultRegistryName != "" { + registryName = cfg.DefaultRegistryName + } + if registryName == "" && cfg.DefaultRegistryName == "" { + registryName = OfficialRegistryName + } + if registryName != "" { + for _, registry := range GetRegistries(cfg) { + if registry.Name == registryName { + return registry, nil + } + } + } + return Registry{}, errors.Errorf("registry %s is not defined in your config file", style.Symbol(registryName)) +} + +const DefaultLifecycleImageRepo = "buildpacksio/lifecycle" diff --git a/vendor/github.com/buildpacks/pack/internal/config/config_helpers.go b/vendor/github.com/buildpacks/pack/internal/config/config_helpers.go new file mode 100644 index 0000000000..9ed17a3a78 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/config/config_helpers.go @@ -0,0 +1,36 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/BurntSushi/toml" + + "github.com/buildpacks/pack/internal/style" +) + +func FormatUndecodedKeys(undecodedKeys []toml.Key) string { + unusedKeys := map[string]interface{}{} + for _, key := range undecodedKeys { + keyName := key.String() + + parent := strings.Split(keyName, ".")[0] + + if _, ok := unusedKeys[parent]; !ok { + unusedKeys[keyName] = nil + } + } + + var errorKeys []string + for errorKey := range unusedKeys { + errorKeys = append(errorKeys, style.Symbol(errorKey)) + } + + pluralizedElement := "element" + if len(errorKeys) > 1 { + pluralizedElement += "s" + } + elements := strings.Join(errorKeys, ", ") + + return fmt.Sprintf("unknown configuration %s %s", pluralizedElement, elements) +} diff --git a/vendor/github.com/buildpacks/pack/internal/container/run.go b/vendor/github.com/buildpacks/pack/internal/container/run.go new file mode 100644 index 0000000000..e39f8fbb23 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/container/run.go @@ -0,0 +1,67 @@ +package container + +import ( + "context" + "fmt" + "io" + + "github.com/docker/docker/api/types" + dcontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/stdcopy" + "github.com/pkg/errors" +) + +type Handler func(bodyChan <-chan dcontainer.ContainerWaitOKBody, errChan <-chan error, reader io.Reader) error + +func RunWithHandler(ctx context.Context, docker client.CommonAPIClient, ctrID string, handler Handler) error { + bodyChan, errChan := docker.ContainerWait(ctx, ctrID, dcontainer.WaitConditionNextExit) + + resp, err := docker.ContainerAttach(ctx, ctrID, types.ContainerAttachOptions{ + Stream: true, + Stdout: true, + Stderr: true, + }) + if err != nil { + return err + } + defer resp.Close() + + if err := docker.ContainerStart(ctx, ctrID, types.ContainerStartOptions{}); err != nil { + return errors.Wrap(err, "container start") + } + + return handler(bodyChan, errChan, resp.Reader) +} + +func DefaultHandler(out, errOut io.Writer) Handler { + return func(bodyChan <-chan dcontainer.ContainerWaitOKBody, errChan <-chan error, reader io.Reader) error { + copyErr := make(chan error) + go func() { + _, err := stdcopy.StdCopy(out, errOut, reader) + defer optionallyCloseWriter(out) + defer optionallyCloseWriter(errOut) + + copyErr <- err + }() + + select { + case body := <-bodyChan: + if body.StatusCode != 0 { + return fmt.Errorf("failed with status code: %d", body.StatusCode) + } + case err := <-errChan: + return err + } + + return <-copyErr + } +} + +func optionallyCloseWriter(writer io.Writer) error { + if closer, ok := writer.(io.Closer); ok { + return closer.Close() + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/layer/layer.go b/vendor/github.com/buildpacks/pack/internal/layer/layer.go new file mode 100644 index 0000000000..2a66559cf9 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/layer/layer.go @@ -0,0 +1,11 @@ +package layer + +import ( + "github.com/buildpacks/pack/pkg/archive" +) + +func CreateSingleFileTar(tarFile, path, txt string, twf archive.TarWriterFactory) error { + tarBuilder := archive.TarBuilder{} + tarBuilder.AddFile(path, 0644, archive.NormalizedDateTime, []byte(txt)) + return tarBuilder.WriteToPath(tarFile, twf) +} diff --git a/vendor/github.com/buildpacks/pack/internal/layer/writer_factory.go b/vendor/github.com/buildpacks/pack/internal/layer/writer_factory.go new file mode 100644 index 0000000000..24f9502f82 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/layer/writer_factory.go @@ -0,0 +1,32 @@ +package layer + +import ( + "archive/tar" + "fmt" + "io" + + ilayer "github.com/buildpacks/imgutil/layer" + + "github.com/buildpacks/pack/pkg/archive" +) + +type WriterFactory struct { + os string +} + +func NewWriterFactory(imageOS string) (*WriterFactory, error) { + if imageOS != "linux" && imageOS != "windows" { + return nil, fmt.Errorf("provided image OS '%s' must be either 'linux' or 'windows'", imageOS) + } + + return &WriterFactory{os: imageOS}, nil +} + +func (f *WriterFactory) NewWriter(fileWriter io.Writer) archive.TarWriter { + if f.os == "windows" { + return ilayer.NewWindowsWriter(fileWriter) + } + + // Linux images use tar.Writer + return tar.NewWriter(fileWriter) +} diff --git a/vendor/github.com/buildpacks/pack/internal/name/name.go b/vendor/github.com/buildpacks/pack/internal/name/name.go new file mode 100644 index 0000000000..c840da5fe6 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/name/name.go @@ -0,0 +1,49 @@ +package name + +import ( + "fmt" + + gname "github.com/google/go-containerregistry/pkg/name" + + "github.com/buildpacks/pack/internal/style" +) + +type Logger interface { + Infof(fmt string, v ...interface{}) +} + +func TranslateRegistry(name string, registryMirrors map[string]string, logger Logger) (string, error) { + if registryMirrors == nil { + return name, nil + } + + srcRef, err := gname.ParseReference(name, gname.WeakValidation) + if err != nil { + return "", err + } + + srcContext := srcRef.Context() + registryMirror, ok := getMirror(srcContext, registryMirrors) + if !ok { + return name, nil + } + + refName := fmt.Sprintf("%s/%s:%s", registryMirror, srcContext.RepositoryStr(), srcRef.Identifier()) + _, err = gname.ParseReference(refName, gname.WeakValidation) + if err != nil { + return "", err + } + + logger.Infof("Using mirror %s for %s", style.Symbol(refName), name) + return refName, nil +} + +func getMirror(repo gname.Repository, registryMirrors map[string]string) (string, bool) { + mirror, ok := registryMirrors["*"] + if ok { + return mirror, ok + } + + mirror, ok = registryMirrors[repo.RegistryStr()] + return mirror, ok +} diff --git a/vendor/github.com/buildpacks/pack/internal/paths/paths.go b/vendor/github.com/buildpacks/pack/internal/paths/paths.go new file mode 100644 index 0000000000..f78695622a --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/paths/paths.go @@ -0,0 +1,135 @@ +package paths + +import ( + "net/url" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +var schemeRegexp = regexp.MustCompile(`^.+:/.*`) + +func IsURI(ref string) bool { + return schemeRegexp.MatchString(ref) +} + +func IsDir(p string) (bool, error) { + fileInfo, err := os.Stat(p) + if err != nil { + return false, err + } + + return fileInfo.IsDir(), nil +} + +// FilePathToURI converts a filepath to URI. If relativeTo is provided not empty and path is +// a relative path it will be made absolute based on the provided value. Otherwise, the +// current working directory is used. +func FilePathToURI(path, relativeTo string) (string, error) { + if IsURI(path) { + return path, nil + } + + if !filepath.IsAbs(path) { + var err error + path, err = filepath.Abs(filepath.Join(relativeTo, path)) + if err != nil { + return "", err + } + } + + if runtime.GOOS == "windows" { + if strings.HasPrefix(path, `\\`) { + return "file://" + filepath.ToSlash(strings.TrimPrefix(path, `\\`)), nil + } + return "file:///" + filepath.ToSlash(path), nil + } + return "file://" + path, nil +} + +// examples: +// +// - unix file: file://laptop/some%20dir/file.tgz +// +// - windows drive: file:///C:/Documents%20and%20Settings/file.tgz +// +// - windows share: file://laptop/My%20Documents/file.tgz +// +func URIToFilePath(uri string) (string, error) { + var ( + osPath string + err error + ) + + osPath = filepath.FromSlash(strings.TrimPrefix(uri, "file://")) + + if osPath, err = url.PathUnescape(osPath); err != nil { + return "", nil + } + + if runtime.GOOS == "windows" { + if strings.HasPrefix(osPath, `\`) { + return strings.TrimPrefix(osPath, `\`), nil + } + return `\\` + osPath, nil + } + return osPath, nil +} + +func FilterReservedNames(p string) string { + // The following keys are reserved on Windows + // https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file?redirectedfrom=MSDN#win32-file-namespaces + reservedNameConversions := map[string]string{ + "aux": "a_u_x", + "com": "c_o_m", + "con": "c_o_n", + "lpt": "l_p_t", + "nul": "n_u_l", + "prn": "p_r_n", + } + for k, v := range reservedNameConversions { + p = strings.ReplaceAll(p, k, v) + } + + return p +} + +// WindowsDir is equivalent to path.Dir or filepath.Dir but always for Windows paths +// reproduced because Windows implementation is not exported +func WindowsDir(p string) string { + pathElements := strings.Split(p, `\`) + + dirName := strings.Join(pathElements[:len(pathElements)-1], `\`) + + return dirName +} + +// WindowsBasename is equivalent to path.Basename or filepath.Basename but always for Windows paths +// reproduced because Windows implementation is not exported +func WindowsBasename(p string) string { + pathElements := strings.Split(p, `\`) + + return pathElements[len(pathElements)-1] +} + +// WindowsToSlash is equivalent to path.ToSlash or filepath.ToSlash but always for Windows paths +// reproduced because Windows implementation is not exported +func WindowsToSlash(p string) string { + slashPath := strings.ReplaceAll(p, `\`, "/") // convert slashes + if len(slashPath) < 2 { + return "" + } + + return slashPath[2:] // strip volume +} + +// WindowsPathSID returns the appropriate SID for a given UID and GID +// This the basic logic for path permissions in Pack and Lifecycle +func WindowsPathSID(uid, gid int) string { + if uid == 0 && gid == 0 { + return "S-1-5-32-544" // BUILTIN\Administrators + } + return "S-1-5-32-545" // BUILTIN\Users +} diff --git a/vendor/github.com/buildpacks/pack/internal/registry/buildpack.go b/vendor/github.com/buildpacks/pack/internal/registry/buildpack.go new file mode 100644 index 0000000000..e5d75431a9 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/registry/buildpack.go @@ -0,0 +1,45 @@ +package registry + +import ( + "fmt" + "strings" + + ggcrname "github.com/google/go-containerregistry/pkg/name" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" +) + +// Buildpack contains information about a buildpack stored in a Registry +type Buildpack struct { + Namespace string `json:"ns"` + Name string `json:"name"` + Version string `json:"version"` + Yanked bool `json:"yanked"` + Address string `json:"addr,omitempty"` +} + +// Validate that a buildpack reference contains required information +func Validate(b Buildpack) error { + if b.Address == "" { + return errors.New("invalid entry: address is a required field") + } + _, err := ggcrname.NewDigest(b.Address) + if err != nil { + return fmt.Errorf("invalid entry: '%s' is not a digest reference", b.Address) + } + + return nil +} + +// ParseNamespaceName parses a buildpack ID into Namespace and Name +func ParseNamespaceName(id string) (ns string, name string, err error) { + parts := strings.Split(id, "/") + if len(parts) < 2 { + return "", "", fmt.Errorf("invalid id %s does not contain a namespace", style.Symbol(id)) + } else if len(parts) > 2 { + return "", "", fmt.Errorf("invalid id %s contains unexpected characters", style.Symbol(id)) + } + + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/registry/git.go b/vendor/github.com/buildpacks/pack/internal/registry/git.go new file mode 100644 index 0000000000..9a90b56377 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/registry/git.go @@ -0,0 +1,31 @@ +package registry + +import ( + "bytes" + "text/template" + + "github.com/pkg/errors" +) + +// GitCommit commits a Buildpack to a registry Cache. +func GitCommit(b Buildpack, username string, registryCache Cache) error { + if err := registryCache.Initialize(); err != nil { + return err + } + + commitTemplate, err := template.New("buildpack").Parse(GitCommitTemplate) + if err != nil { + return err + } + + var commit bytes.Buffer + if err := commitTemplate.Execute(&commit, b); err != nil { + return errors.Wrap(err, "creating template") + } + + if err := registryCache.Commit(b, username, commit.String()); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/registry/github.go b/vendor/github.com/buildpacks/pack/internal/registry/github.go new file mode 100644 index 0000000000..329de29d52 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/registry/github.go @@ -0,0 +1,73 @@ +package registry + +import ( + "bytes" + "fmt" + "net/url" + "os/exec" + "strings" + "text/template" + + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" +) + +type GithubIssue struct { + Title string + Body string +} + +func CreateGithubIssue(b Buildpack) (GithubIssue, error) { + titleTemplate, err := template.New("buildpack").Parse(GithubIssueTitleTemplate) + if err != nil { + return GithubIssue{}, err + } + + bodyTemplate, err := template.New("buildpack").Parse(GithubIssueBodyTemplate) + if err != nil { + return GithubIssue{}, err + } + + var title bytes.Buffer + err = titleTemplate.Execute(&title, b) + if err != nil { + return GithubIssue{}, err + } + + var body bytes.Buffer + err = bodyTemplate.Execute(&body, b) + if err != nil { + return GithubIssue{}, err + } + + return GithubIssue{ + title.String(), + body.String(), + }, nil +} + +func CreateBrowserCmd(browserURL, os string) (*exec.Cmd, error) { + _, err := url.ParseRequestURI(browserURL) + if err != nil { + return nil, errors.Wrapf(err, "invalid URL %s", style.Symbol(browserURL)) + } + + switch os { + case "linux": + return exec.Command("xdg-open", browserURL), nil + case "windows": + return exec.Command("rundll32", "url.dll,FileProtocolHandler", browserURL), nil + case "darwin": + return exec.Command("open", browserURL), nil + default: + return nil, fmt.Errorf("unsupported platform %s", style.Symbol(os)) + } +} + +func GetIssueURL(githubURL string) (*url.URL, error) { + if githubURL == "" { + return nil, errors.New("missing github URL") + } + return url.Parse(fmt.Sprintf("%s/issues/new", strings.TrimSuffix(githubURL, "/"))) +} diff --git a/vendor/github.com/buildpacks/pack/internal/registry/index.go b/vendor/github.com/buildpacks/pack/internal/registry/index.go new file mode 100644 index 0000000000..928c3b07f8 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/registry/index.go @@ -0,0 +1,57 @@ +package registry + +import ( + "fmt" + "path/filepath" + "regexp" + + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" +) + +var ( + validCharsPattern = "[a-z0-9\\-.]+" + validCharsRegexp = regexp.MustCompile(fmt.Sprintf("^%s$", validCharsPattern)) +) + +// IndexPath resolves the path for a specific namespace and name of buildpack +func IndexPath(rootDir, ns, name string) (string, error) { + if err := validateField("namespace", ns); err != nil { + return "", err + } + + if err := validateField("name", name); err != nil { + return "", err + } + + var indexDir string + switch { + case len(name) == 1: + indexDir = "1" + case len(name) == 2: + indexDir = "2" + case len(name) == 3: + indexDir = filepath.Join("3", name[:2]) + default: + indexDir = filepath.Join(name[:2], name[2:4]) + } + + return filepath.Join(rootDir, indexDir, fmt.Sprintf("%s_%s", ns, name)), nil +} + +func validateField(field, value string) error { + length := len(value) + switch { + case length == 0: + return errors.Errorf("%s cannot be empty", style.Symbol(field)) + case length > 253: + return errors.Errorf("%s too long (max 253 chars)", style.Symbol(field)) + } + + if !validCharsRegexp.MatchString(value) { + return errors.Errorf("%s contains illegal characters (must match %s)", style.Symbol(field), style.Symbol(validCharsPattern)) + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/registry/registry_cache.go b/vendor/github.com/buildpacks/pack/internal/registry/registry_cache.go new file mode 100644 index 0000000000..95c9862c61 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/registry/registry_cache.go @@ -0,0 +1,357 @@ +package registry + +import ( + "bufio" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/pkg/errors" + "golang.org/x/mod/semver" + "gopkg.in/src-d/go-git.v4" + "gopkg.in/src-d/go-git.v4/plumbing/object" + + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/logging" +) + +const DefaultRegistryURL = "https://github.com/buildpacks/registry-index" +const DefaultRegistryName = "official" +const defaultRegistryDir = "registry" + +// Cache is a RegistryCache +type Cache struct { + logger logging.Logger + url *url.URL + Root string + RegistryDir string +} + +const GithubIssueTitleTemplate = "{{ if .Yanked }}YANK{{ else }}ADD{{ end }} {{.Namespace}}/{{.Name}}@{{.Version}}" +const GithubIssueBodyTemplate = ` +id = "{{.Namespace}}/{{.Name}}" +version = "{{.Version}}" +{{ if .Yanked }}{{ else if .Address }}addr = "{{.Address}}"{{ end }} +` +const GitCommitTemplate = `{{ if .Yanked }}YANK{{else}}ADD{{end}} {{.Namespace}}/{{.Name}}@{{.Version}}` + +// Entry is a list of buildpacks stored in a registry +type Entry struct { + Buildpacks []Buildpack `json:"buildpacks"` +} + +// NewDefaultRegistryCache creates a new registry cache with default options +func NewDefaultRegistryCache(logger logging.Logger, home string) (Cache, error) { + return NewRegistryCache(logger, home, DefaultRegistryURL) +} + +// NewRegistryCache creates a new registry cache +func NewRegistryCache(logger logging.Logger, home, registryURL string) (Cache, error) { + if _, err := os.Stat(home); err != nil { + return Cache{}, errors.Wrapf(err, "finding home %s", home) + } + + normalizedURL, err := url.Parse(registryURL) + if err != nil { + return Cache{}, errors.Wrapf(err, "parsing registry url %s", registryURL) + } + + key := sha256.New() + key.Write([]byte(normalizedURL.String())) + cacheDir := fmt.Sprintf("%s-%s", defaultRegistryDir, hex.EncodeToString(key.Sum(nil))) + + return Cache{ + url: normalizedURL, + logger: logger, + Root: filepath.Join(home, cacheDir), + }, nil +} + +// LocateBuildpack stored in registry +func (r *Cache) LocateBuildpack(bp string) (Buildpack, error) { + err := r.Refresh() + if err != nil { + return Buildpack{}, errors.Wrap(err, "refreshing cache") + } + + ns, name, version, err := buildpack.ParseRegistryID(bp) + if err != nil { + return Buildpack{}, errors.Wrap(err, "parsing buildpacks registry id") + } + + entry, err := r.readEntry(ns, name) + if err != nil { + return Buildpack{}, errors.Wrap(err, "reading entry") + } + + if len(entry.Buildpacks) > 0 { + if version == "" { + highestVersion := entry.Buildpacks[0] + if len(entry.Buildpacks) > 1 { + for _, bp := range entry.Buildpacks[1:] { + if semver.Compare(fmt.Sprintf("v%s", bp.Version), fmt.Sprintf("v%s", highestVersion.Version)) > 0 { + highestVersion = bp + } + } + } + return highestVersion, Validate(highestVersion) + } + + for _, bpIndex := range entry.Buildpacks { + if bpIndex.Version == version { + return bpIndex, Validate(bpIndex) + } + } + return Buildpack{}, fmt.Errorf("could not find version for buildpack: %s", bp) + } + + return Buildpack{}, fmt.Errorf("no entries for buildpack: %s", bp) +} + +// Refresh local Registry Cache +func (r *Cache) Refresh() error { + r.logger.Debugf("Refreshing registry cache for %s/%s", r.url.Host, r.url.Path) + + if err := r.Initialize(); err != nil { + return errors.Wrapf(err, "initializing (%s)", r.Root) + } + + repository, err := git.PlainOpen(r.Root) + if err != nil { + return errors.Wrapf(err, "opening (%s)", r.Root) + } + + w, err := repository.Worktree() + if err != nil { + return errors.Wrapf(err, "reading (%s)", r.Root) + } + + err = w.Pull(&git.PullOptions{RemoteName: "origin"}) + if err == git.NoErrAlreadyUpToDate { + return nil + } + return err +} + +// Initialize a local Registry Cache +func (r *Cache) Initialize() error { + _, err := os.Stat(r.Root) + if err != nil { + if os.IsNotExist(err) { + err = r.CreateCache() + if err != nil { + return errors.Wrap(err, "creating registry cache") + } + } + } + + if err := r.validateCache(); err != nil { + err = os.RemoveAll(r.Root) + if err != nil { + return errors.Wrap(err, "resetting registry cache") + } + err = r.CreateCache() + if err != nil { + return errors.Wrap(err, "rebuilding registry cache") + } + } + + return nil +} + +// CreateCache creates the cache on the filesystem +func (r *Cache) CreateCache() error { + r.logger.Debugf("Creating registry cache for %s/%s", r.url.Host, r.url.Path) + + registryDir, err := ioutil.TempDir(filepath.Dir(r.Root), "registry") + if err != nil { + return err + } + + r.RegistryDir = registryDir + + repository, err := git.PlainClone(r.RegistryDir, false, &git.CloneOptions{ + URL: r.url.String(), + }) + if err != nil { + return errors.Wrap(err, "cloning remote registry") + } + + w, err := repository.Worktree() + if err != nil { + return err + } + + err = os.Rename(w.Filesystem.Root(), r.Root) + if err != nil { + if err == os.ErrExist { + // If pack is run concurrently, this action might have already occurred + return nil + } + return err + } + return nil +} + +func (r *Cache) validateCache() error { + r.logger.Debugf("Validating registry cache for %s/%s", r.url.Host, r.url.Path) + + repository, err := git.PlainOpen(r.Root) + if err != nil { + return errors.Wrap(err, "opening registry cache") + } + + remotes, err := repository.Remotes() + if err != nil { + return errors.Wrap(err, "accessing registry cache") + } + + for _, remote := range remotes { + if remote.Config().Name == "origin" && remotes[0].Config().URLs[0] != r.url.String() { + return nil + } + } + return errors.New("invalid registry cache remote") +} + +// Commit a Buildpack change +func (r *Cache) Commit(b Buildpack, username, msg string) error { + r.logger.Debugf("Creating commit in registry cache") + + if msg == "" { + return errors.New("invalid commit message") + } + + repository, err := git.PlainOpen(r.Root) + if err != nil { + return errors.Wrap(err, "opening registry cache") + } + + w, err := repository.Worktree() + if err != nil { + return errors.Wrapf(err, "reading %s", style.Symbol(r.Root)) + } + + index, err := r.writeEntry(b) + if err != nil { + return errors.Wrapf(err, "writing %s", style.Symbol(index)) + } + + relativeIndexFile, err := filepath.Rel(r.Root, index) + if err != nil { + return errors.Wrap(err, "resolving relative path") + } + + if _, err := w.Add(relativeIndexFile); err != nil { + return errors.Wrapf(err, "adding %s", style.Symbol(index)) + } + + if _, err := w.Commit(msg, &git.CommitOptions{ + Author: &object.Signature{ + Name: username, + Email: "", + When: time.Now(), + }, + }); err != nil { + return errors.Wrapf(err, "committing") + } + + return nil +} + +func (r *Cache) writeEntry(b Buildpack) (string, error) { + var ns = b.Namespace + var name = b.Name + + index, err := IndexPath(r.Root, ns, name) + if err != nil { + return "", err + } + + if _, err := os.Stat(index); os.IsNotExist(err) { + if err := os.MkdirAll(filepath.Dir(index), 0750); err != nil { + return "", errors.Wrapf(err, "creating directory structure for: %s/%s", ns, name) + } + } else { + if _, err := os.Stat(index); err == nil { + entry, err := r.readEntry(ns, name) + if err != nil { + return "", errors.Wrapf(err, "reading existing buildpack entries") + } + + availableBuildpacks := entry.Buildpacks + + if len(availableBuildpacks) != 0 { + if availableBuildpacks[len(availableBuildpacks)-1].Version == b.Version { + return "", errors.Wrapf(err, "same version exists, upgrade the version to add") + } + } + } + } + + f, err := os.OpenFile(filepath.Clean(index), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + return "", errors.Wrapf(err, "creating buildpack file: %s/%s", ns, name) + } + defer f.Close() + + newline := "\n" + if runtime.GOOS == "windows" { + newline = "\r\n" + } + + fileContents, err := json.Marshal(b) + if err != nil { + return "", errors.Wrapf(err, "converting buildpack file to json: %s/%s", ns, name) + } + + fileContentsFormatted := string(fileContents) + newline + if _, err := f.WriteString(fileContentsFormatted); err != nil { + return "", errors.Wrapf(err, "writing buildpack to file: %s/%s", ns, name) + } + + return index, nil +} + +func (r *Cache) readEntry(ns, name string) (Entry, error) { + index, err := IndexPath(r.Root, ns, name) + if err != nil { + return Entry{}, err + } + + if _, err := os.Stat(index); err != nil { + return Entry{}, errors.Wrapf(err, "finding buildpack: %s/%s", ns, name) + } + + file, err := os.Open(filepath.Clean(index)) + if err != nil { + return Entry{}, errors.Wrapf(err, "opening index for buildpack: %s/%s", ns, name) + } + defer file.Close() + + entry := Entry{} + scanner := bufio.NewScanner(file) + for scanner.Scan() { + var bp Buildpack + err = json.Unmarshal([]byte(scanner.Text()), &bp) + if err != nil { + return Entry{}, errors.Wrapf(err, "parsing index for buildpack: %s/%s", ns, name) + } + + entry.Buildpacks = append(entry.Buildpacks, bp) + } + + if err := scanner.Err(); err != nil { + return entry, errors.Wrapf(err, "reading index for buildpack: %s/%s", ns, name) + } + + return entry, nil +} diff --git a/vendor/github.com/buildpacks/pack/internal/stack/merge.go b/vendor/github.com/buildpacks/pack/internal/stack/merge.go new file mode 100644 index 0000000000..854e96362f --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/stack/merge.go @@ -0,0 +1,62 @@ +package stack + +import ( + "sort" + + "github.com/buildpacks/pack/internal/stringset" + "github.com/buildpacks/pack/pkg/dist" +) + +// MergeCompatible determines the allowable set of stacks that a combination of buildpacks may run on, given each +// buildpack's set of stacks. Compatibility between the two sets of buildpack stacks is defined by the following rules: +// +// 1. The stack must be supported by both buildpacks. That is, any resulting stack ID must appear in both input sets. +// 2. For each supported stack ID, all required mixins for all buildpacks must be provided by the result. That is, +// mixins for the stack ID in both input sets are unioned. +// +// --- +// +// Examples: +// +// stacksA = [{ID: "stack1", mixins: ["build:mixinA", "mixinB", "run:mixinC"]}}] +// stacksB = [{ID: "stack1", mixins: ["build:mixinA", "run:mixinC"]}}] +// result = [{ID: "stack1", mixins: ["build:mixinA", "mixinB", "run:mixinC"]}}] +// +// stacksA = [{ID: "stack1", mixins: ["build:mixinA"]}}, {ID: "stack2", mixins: ["mixinA"]}}] +// stacksB = [{ID: "stack1", mixins: ["run:mixinC"]}}, {ID: "stack2", mixins: ["mixinA"]}}] +// result = [{ID: "stack1", mixins: ["build:mixinA", "run:mixinC"]}}, {ID: "stack2", mixins: ["mixinA"]}}] +// +// stacksA = [{ID: "stack1", mixins: ["build:mixinA"]}}, {ID: "stack2", mixins: ["mixinA"]}}] +// stacksB = [{ID: "stack2", mixins: ["mixinA", "run:mixinB"]}}] +// result = [{ID: "stack2", mixins: ["mixinA", "run:mixinB"]}}] +// +// stacksA = [{ID: "stack1", mixins: ["build:mixinA"]}}] +// stacksB = [{ID: "stack2", mixins: ["mixinA", "run:mixinB"]}}] +// result = [] +// +func MergeCompatible(stacksA []dist.Stack, stacksB []dist.Stack) []dist.Stack { + set := map[string][]string{} + + for _, s := range stacksA { + set[s.ID] = s.Mixins + } + + var results []dist.Stack + for _, s := range stacksB { + if stackMixins, ok := set[s.ID]; ok { + mixinsSet := stringset.FromSlice(append(stackMixins, s.Mixins...)) + var mixins []string + for s := range mixinsSet { + mixins = append(mixins, s) + } + sort.Strings(mixins) + + results = append(results, dist.Stack{ + ID: s.ID, + Mixins: mixins, + }) + } + } + + return results +} diff --git a/vendor/github.com/buildpacks/pack/internal/stack/mixins.go b/vendor/github.com/buildpacks/pack/internal/stack/mixins.go new file mode 100644 index 0000000000..b3e77c2827 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/stack/mixins.go @@ -0,0 +1,55 @@ +package stack + +import ( + "fmt" + "sort" + "strings" + + "github.com/buildpacks/pack/internal/stringset" + "github.com/buildpacks/pack/internal/style" +) + +const MixinsLabel = "io.buildpacks.stack.mixins" + +func ValidateMixins(buildImageName string, buildImageMixins []string, runImageName string, runImageMixins []string) error { + if invalid := FindStageMixins(buildImageMixins, "run"); len(invalid) > 0 { + sort.Strings(invalid) + return fmt.Errorf("%s contains run-only mixin(s): %s", style.Symbol(buildImageName), strings.Join(invalid, ", ")) + } + + if invalid := FindStageMixins(runImageMixins, "build"); len(invalid) > 0 { + sort.Strings(invalid) + return fmt.Errorf("%s contains build-only mixin(s): %s", style.Symbol(runImageName), strings.Join(invalid, ", ")) + } + + buildImageMixins = removeStageMixins(buildImageMixins, "build") + runImageMixins = removeStageMixins(runImageMixins, "run") + + _, missing, _ := stringset.Compare(runImageMixins, buildImageMixins) + + if len(missing) > 0 { + sort.Strings(missing) + return fmt.Errorf("%s missing required mixin(s): %s", style.Symbol(runImageName), strings.Join(missing, ", ")) + } + return nil +} + +func FindStageMixins(mixins []string, stage string) []string { + var found []string + for _, m := range mixins { + if strings.HasPrefix(m, stage+":") { + found = append(found, m) + } + } + return found +} + +func removeStageMixins(mixins []string, stage string) []string { + var filtered []string + for _, m := range mixins { + if !strings.HasPrefix(m, stage+":") { + filtered = append(filtered, m) + } + } + return filtered +} diff --git a/vendor/github.com/buildpacks/pack/internal/stringset/stringset.go b/vendor/github.com/buildpacks/pack/internal/stringset/stringset.go new file mode 100644 index 0000000000..feb7842e7a --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/stringset/stringset.go @@ -0,0 +1,37 @@ +package stringset + +// FromSlice converts the given slice to a set in the form of unique keys in a map. +// The value associated with each key should not be relied upon. A value is present +// in the set if its key is present in the map, regardless of the key's value. +func FromSlice(strings []string) map[string]interface{} { + set := map[string]interface{}{} + for _, s := range strings { + set[s] = nil + } + return set +} + +// Compare performs a set comparison between two slices. `extra` represents elements present in +// `strings1` but not `strings2`. `missing` represents elements present in `strings2` that are +// missing from `strings1`. `common` represents elements present in both slices. Since the input +// slices are treated as sets, duplicates will be removed in any outputs. +func Compare(strings1, strings2 []string) (extra []string, missing []string, common []string) { + set1 := FromSlice(strings1) + set2 := FromSlice(strings2) + + for s := range set1 { + if _, ok := set2[s]; !ok { + extra = append(extra, s) + continue + } + common = append(common, s) + } + + for s := range set2 { + if _, ok := set1[s]; !ok { + missing = append(missing, s) + } + } + + return extra, missing, common +} diff --git a/vendor/github.com/buildpacks/pack/internal/style/style.go b/vendor/github.com/buildpacks/pack/internal/style/style.go new file mode 100644 index 0000000000..0ce59a5bca --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/style/style.go @@ -0,0 +1,62 @@ +package style + +import ( + "fmt" + "sort" + "strings" + + "github.com/heroku/color" +) + +var Symbol = func(value string) string { + if color.Enabled() { + return Key(value) + } + return "'" + value + "'" +} + +var Map = func(value map[string]string, prefix, separator string) string { + result := "" + + var keys []string + + for key := range value { + keys = append(keys, key) + } + + sort.Strings(keys) + + for _, key := range keys { + result += fmt.Sprintf("%s%s=%s%s", prefix, key, value[key], separator) + } + + if color.Enabled() { + return Key(strings.TrimSpace(result)) + } + return "'" + strings.TrimSpace(result) + "'" +} + +var SymbolF = func(format string, a ...interface{}) string { + if color.Enabled() { + return Key(format, a...) + } + return "'" + fmt.Sprintf(format, a...) + "'" +} + +var Key = color.HiBlueString + +var Tip = color.New(color.FgGreen, color.Bold).SprintfFunc() + +var Warn = color.New(color.FgYellow, color.Bold).SprintfFunc() + +var Error = color.New(color.FgRed, color.Bold).SprintfFunc() + +var Step = func(format string, a ...interface{}) string { + return color.CyanString("===> "+format, a...) +} + +var Prefix = color.CyanString +var Waiting = color.HiBlackString +var Working = color.HiBlueString +var Complete = color.GreenString +var ProgressBar = color.HiBlueString diff --git a/vendor/github.com/buildpacks/pack/internal/term/term.go b/vendor/github.com/buildpacks/pack/internal/term/term.go new file mode 100644 index 0000000000..04a246e783 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/term/term.go @@ -0,0 +1,25 @@ +package term + +import ( + "io" + + "golang.org/x/term" +) + +// InvalidFileDescriptor based on https://golang.org/src/os/file_unix.go?s=2183:2210#L57 +const InvalidFileDescriptor = ^(uintptr(0)) + +// IsTerminal returns whether a writer is a terminal +func IsTerminal(w io.Writer) (uintptr, bool) { + if f, ok := w.(hasDescriptor); ok { + termFd := f.Fd() + isTerm := term.IsTerminal(int(termFd)) + return termFd, isTerm + } + + return InvalidFileDescriptor, false +} + +type hasDescriptor interface { + Fd() uintptr +} diff --git a/vendor/github.com/buildpacks/pack/internal/termui/detect.go b/vendor/github.com/buildpacks/pack/internal/termui/detect.go new file mode 100644 index 0000000000..ad7836b28b --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/termui/detect.go @@ -0,0 +1,82 @@ +package termui + +import ( + "time" + + "github.com/rivo/tview" +) + +type Detect struct { + app app + textView *tview.TextView + doneChan chan bool +} + +func NewDetect(app app) *Detect { + d := &Detect{ + app: app, + textView: detectStatusTV(app), + doneChan: make(chan bool, 1), + } + + go d.start() + grid := centered(d.textView) + d.app.SetRoot(grid, true) + return d +} + +func (d *Detect) Stop() { + d.doneChan <- true +} + +func (d *Detect) start() { + var ( + i = 0 + ticker = time.NewTicker(250 * time.Millisecond) + doneText = "⌛️ Detected!" + texts = []string{ + "⏳️ Detecting", + "⏳️ Detecting.", + "⏳️ Detecting..", + "⏳️ Detecting...", + } + ) + + for { + select { + case <-ticker.C: + d.textView.SetText(texts[i]) + + i++ + if i == len(texts) { + i = 0 + } + case <-d.doneChan: + ticker.Stop() + d.textView.SetText(doneText) + return + } + } +} + +func detectStatusTV(app app) *tview.TextView { + tv := tview.NewTextView() + tv.SetBackgroundColor(backgroundColor) + tv.SetChangedFunc(func() { app.Draw() }) + return tv +} + +func centered(p tview.Primitive) tview.Primitive { + return tview.NewGrid(). + SetColumns(0, 20, 0). + SetRows(0, 1, 0). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 0, 0, 1, 1, 0, 0, true). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 0, 1, 1, 1, 0, 0, true). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 0, 2, 1, 1, 0, 0, true). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 1, 0, 1, 1, 0, 0, true). + AddItem(p, 1, 1, 1, 1, 0, 0, true). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 1, 2, 1, 1, 0, 0, true). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 2, 0, 1, 1, 0, 0, true). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 2, 1, 1, 1, 0, 0, true). + AddItem(tview.NewBox().SetBackgroundColor(backgroundColor), 2, 2, 1, 1, 0, 0, true) +} diff --git a/vendor/github.com/buildpacks/pack/internal/termui/logger.go b/vendor/github.com/buildpacks/pack/internal/termui/logger.go new file mode 100644 index 0000000000..0f07791da3 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/termui/logger.go @@ -0,0 +1,45 @@ +package termui + +import "io" + +func (s *Termui) Debug(msg string) { + // not implemented +} + +func (s *Termui) Debugf(fmt string, v ...interface{}) { + // not implemented +} + +func (s *Termui) Info(msg string) { + s.textChan <- msg +} + +func (s *Termui) Infof(fmt string, v ...interface{}) { + // not implemented +} + +func (s *Termui) Warn(msg string) { + // not implemented +} + +func (s *Termui) Warnf(fmt string, v ...interface{}) { + // not implemented +} + +func (s *Termui) Error(msg string) { + // not implemented +} + +func (s *Termui) Errorf(fmt string, v ...interface{}) { + // not implemented +} + +func (s *Termui) Writer() io.Writer { + // not implemented + return nil +} + +func (s *Termui) IsVerbose() bool { + // not implemented + return false +} diff --git a/vendor/github.com/buildpacks/pack/internal/termui/termui.go b/vendor/github.com/buildpacks/pack/internal/termui/termui.go new file mode 100644 index 0000000000..08609789e0 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/internal/termui/termui.go @@ -0,0 +1,110 @@ +package termui + +import ( + "bufio" + "io" + "io/ioutil" + "strings" + + dcontainer "github.com/docker/docker/api/types/container" + "github.com/docker/docker/pkg/stdcopy" + "github.com/gdamore/tcell/v2" + "github.com/rivo/tview" + + "github.com/buildpacks/pack/internal/container" +) + +var ( + backgroundColor = tcell.NewRGBColor(5, 30, 40) +) + +type app interface { + SetRoot(root tview.Primitive, fullscreen bool) *tview.Application + Draw() *tview.Application + Run() error +} + +type page interface { + Stop() +} + +type Termui struct { + app app + currentPage page + textChan chan string +} + +func NewTermui() *Termui { + return &Termui{ + app: tview.NewApplication(), + textChan: make(chan string, 10), + } +} + +// Run starts the terminal UI process in the foreground +// and the passed in function in the background +func (s *Termui) Run(funk func()) error { + go funk() + go s.handle() + defer s.stop() + + s.currentPage = NewDetect(s.app) + return s.app.Run() +} + +func (s *Termui) stop() { + close(s.textChan) +} + +func (s *Termui) handle() { + for txt := range s.textChan { + switch { + case strings.Contains(txt, "===> ANALYZING"): + s.currentPage.Stop() + default: + // no-op + } + } +} + +func (s *Termui) Handler() container.Handler { + return func(bodyChan <-chan dcontainer.ContainerWaitOKBody, errChan <-chan error, reader io.Reader) error { + var ( + copyErr = make(chan error) + r, w = io.Pipe() + scanner = bufio.NewScanner(r) + ) + + go func() { + defer w.Close() + + _, err := stdcopy.StdCopy(w, ioutil.Discard, reader) + if err != nil { + copyErr <- err + } + }() + + for { + select { + //TODO: errors should show up on screen + // instead of halting loop + //See: https://github.com/buildpacks/pack/issues/1262 + case err := <-copyErr: + return err + case err := <-errChan: + return err + default: + if !scanner.Scan() { + err := scanner.Err() + if err != nil { + return err + } + + return nil + } + + s.textChan <- scanner.Text() + } + } + } +} diff --git a/vendor/github.com/buildpacks/pack/pkg/archive/archive.go b/vendor/github.com/buildpacks/pack/pkg/archive/archive.go new file mode 100644 index 0000000000..cf0fa09537 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/archive/archive.go @@ -0,0 +1,382 @@ +// Package archive defines a set of functions for reading and writing directories and files in a number of tar formats. +package archive // import "github.com/buildpacks/pack/pkg/archive" + +import ( + "archive/tar" + "archive/zip" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "time" + + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" +) + +var NormalizedDateTime time.Time + +func init() { + NormalizedDateTime = time.Date(1980, time.January, 1, 0, 0, 1, 0, time.UTC) +} + +type TarWriter interface { + WriteHeader(hdr *tar.Header) error + Write(b []byte) (int, error) + Close() error +} + +type TarWriterFactory interface { + NewWriter(io.Writer) TarWriter +} + +type defaultTarWriterFactory struct{} + +func DefaultTarWriterFactory() TarWriterFactory { + return defaultTarWriterFactory{} +} + +func (defaultTarWriterFactory) NewWriter(w io.Writer) TarWriter { + return tar.NewWriter(w) +} + +func ReadDirAsTar(srcDir, basePath string, uid, gid int, mode int64, normalizeModTime, includeRoot bool, fileFilter func(string) bool) io.ReadCloser { + return GenerateTar(func(tw TarWriter) error { + return WriteDirToTar(tw, srcDir, basePath, uid, gid, mode, normalizeModTime, includeRoot, fileFilter) + }) +} + +func ReadZipAsTar(srcPath, basePath string, uid, gid int, mode int64, normalizeModTime bool, fileFilter func(string) bool) io.ReadCloser { + return GenerateTar(func(tw TarWriter) error { + return WriteZipToTar(tw, srcPath, basePath, uid, gid, mode, normalizeModTime, fileFilter) + }) +} + +func GenerateTar(genFn func(TarWriter) error) io.ReadCloser { + return GenerateTarWithWriter(genFn, DefaultTarWriterFactory()) +} + +// GenerateTarWithTar returns a reader to a tar from a generator function using a writer from the provided factory. +// Note that the generator will not fully execute until the reader is fully read from. Any errors returned by the +// generator will be returned when reading the reader. +func GenerateTarWithWriter(genFn func(TarWriter) error, twf TarWriterFactory) io.ReadCloser { + errChan := make(chan error) + pr, pw := io.Pipe() + + go func() { + tw := twf.NewWriter(pw) + defer func() { + if r := recover(); r != nil { + tw.Close() + pw.CloseWithError(errors.Errorf("panic: %v", r)) + } + }() + + err := genFn(tw) + + closeErr := tw.Close() + closeErr = aggregateError(closeErr, pw.CloseWithError(err)) + + errChan <- closeErr + }() + + closed := false + return ioutils.NewReadCloserWrapper(pr, func() error { + if closed { + return errors.New("reader already closed") + } + + var completeErr error + + // closing the reader ensures that if anything attempts + // further reading it doesn't block waiting for content + if err := pr.Close(); err != nil { + completeErr = aggregateError(completeErr, err) + } + + // wait until everything closes properly + if err := <-errChan; err != nil { + completeErr = aggregateError(completeErr, err) + } + + closed = true + return completeErr + }) +} + +func aggregateError(base, addition error) error { + if addition == nil { + return base + } + + if base == nil { + return addition + } + + return errors.Wrap(addition, base.Error()) +} + +func CreateSingleFileTarReader(path, txt string) io.ReadCloser { + tarBuilder := TarBuilder{} + tarBuilder.AddFile(path, 0644, NormalizedDateTime, []byte(txt)) + return tarBuilder.Reader(DefaultTarWriterFactory()) +} + +func CreateSingleFileTar(tarFile, path, txt string) error { + tarBuilder := TarBuilder{} + tarBuilder.AddFile(path, 0644, NormalizedDateTime, []byte(txt)) + return tarBuilder.WriteToPath(tarFile, DefaultTarWriterFactory()) +} + +// ErrEntryNotExist is an error returned if an entry path doesn't exist +var ErrEntryNotExist = errors.New("not exist") + +// IsEntryNotExist detects whether a given error is of type ErrEntryNotExist +func IsEntryNotExist(err error) bool { + return err == ErrEntryNotExist || errors.Cause(err) == ErrEntryNotExist +} + +// ReadTarEntry reads and returns a tar file +func ReadTarEntry(rc io.Reader, entryPath string) (*tar.Header, []byte, error) { + tr := tar.NewReader(rc) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, nil, errors.Wrap(err, "failed to get next tar entry") + } + + if path.Clean(header.Name) == entryPath { + buf, err := ioutil.ReadAll(tr) + if err != nil { + return nil, nil, errors.Wrapf(err, "failed to read contents of '%s'", entryPath) + } + + return header, buf, nil + } + } + + return nil, nil, errors.Wrapf(ErrEntryNotExist, "could not find entry path '%s'", entryPath) +} + +// WriteDirToTar writes the contents of a directory to a tar writer. `basePath` is the "location" in the tar the +// contents will be placed. The includeRoot param sets the permissions and metadata on the root file. +func WriteDirToTar(tw TarWriter, srcDir, basePath string, uid, gid int, mode int64, normalizeModTime, includeRoot bool, fileFilter func(string) bool) error { + if includeRoot { + rootHeader := &tar.Header{ + Typeflag: tar.TypeDir, + Name: basePath, + Mode: mode, + } + finalizeHeader(rootHeader, uid, gid, mode, normalizeModTime) + if err := tw.WriteHeader(rootHeader); err != nil { + return err + } + } + + return filepath.Walk(srcDir, func(file string, fi os.FileInfo, err error) error { + var relPath string + if fileFilter != nil { + relPath, err = filepath.Rel(srcDir, file) + if err != nil { + return err + } + if !fileFilter(relPath) { + return nil + } + } + if err != nil { + return err + } + + if fi.Mode()&os.ModeSocket != 0 { + return nil + } + + var header *tar.Header + if fi.Mode()&os.ModeSymlink != 0 { + target, err := os.Readlink(file) + if err != nil { + return err + } + + // Ensure that symlinks have Linux link names, independent of source OS + header, err = tar.FileInfoHeader(fi, filepath.ToSlash(target)) + if err != nil { + return err + } + } else { + header, err = tar.FileInfoHeader(fi, fi.Name()) + if err != nil { + return err + } + } + + if relPath == "" { + relPath, err = filepath.Rel(srcDir, file) + if err != nil { + return err + } + } + if relPath == "." { + return nil + } + + header.Name = filepath.ToSlash(filepath.Join(basePath, relPath)) + finalizeHeader(header, uid, gid, mode, normalizeModTime) + + if err := tw.WriteHeader(header); err != nil { + return err + } + + if fi.Mode().IsRegular() { + f, err := os.Open(filepath.Clean(file)) + if err != nil { + return err + } + defer f.Close() + + if _, err := io.Copy(tw, f); err != nil { + return err + } + } + + return nil + }) +} + +// WriteZipToTar writes the contents of a zip file to a tar writer. +func WriteZipToTar(tw TarWriter, srcZip, basePath string, uid, gid int, mode int64, normalizeModTime bool, fileFilter func(string) bool) error { + zipReader, err := zip.OpenReader(srcZip) + if err != nil { + return err + } + defer zipReader.Close() + + var fileMode int64 + for _, f := range zipReader.File { + if fileFilter != nil && !fileFilter(f.Name) { + continue + } + + fileMode = mode + if isFatFile(f.FileHeader) { + fileMode = 0777 + } + + var header *tar.Header + if f.Mode()&os.ModeSymlink != 0 { + target, err := func() (string, error) { + r, err := f.Open() + if err != nil { + return "", nil + } + defer r.Close() + + // contents is the target of the symlink + target, err := ioutil.ReadAll(r) + if err != nil { + return "", err + } + + return string(target), nil + }() + + if err != nil { + return err + } + + header, err = tar.FileInfoHeader(f.FileInfo(), target) + if err != nil { + return err + } + } else { + header, err = tar.FileInfoHeader(f.FileInfo(), f.Name) + if err != nil { + return err + } + } + + header.Name = filepath.ToSlash(filepath.Join(basePath, f.Name)) + finalizeHeader(header, uid, gid, fileMode, normalizeModTime) + + if err := tw.WriteHeader(header); err != nil { + return err + } + + if f.Mode().IsRegular() { + err := func() error { + fi, err := f.Open() + if err != nil { + return err + } + defer fi.Close() + + _, err = io.Copy(tw, fi) + return err + }() + + if err != nil { + return err + } + } + } + + return nil +} + +func isFatFile(header zip.FileHeader) bool { + var ( + creatorFAT uint16 = 0 + creatorVFAT uint16 = 14 + ) + + // This identifies FAT files, based on the `zip` source: https://golang.org/src/archive/zip/struct.go + firstByte := header.CreatorVersion >> 8 + return firstByte == creatorFAT || firstByte == creatorVFAT +} + +func finalizeHeader(header *tar.Header, uid, gid int, mode int64, normalizeModTime bool) { + NormalizeHeader(header, normalizeModTime) + if mode != -1 { + header.Mode = mode + } + header.Uid = uid + header.Gid = gid +} + +// NormalizeHeader normalizes a tar.Header +// +// Normalizes the following: +// - ModTime +// - GID +// - UID +// - User Name +// - Group Name +func NormalizeHeader(header *tar.Header, normalizeModTime bool) { + if normalizeModTime { + header.ModTime = NormalizedDateTime + } + header.Uid = 0 + header.Gid = 0 + header.Uname = "" + header.Gname = "" +} + +// IsZip detects whether or not a File is a zip directory +func IsZip(path string) (bool, error) { + r, err := zip.OpenReader(path) + + switch { + case err == nil: + r.Close() + return true, nil + case err == zip.ErrFormat: + return false, nil + default: + return false, err + } +} diff --git a/vendor/github.com/buildpacks/pack/pkg/archive/tar_builder.go b/vendor/github.com/buildpacks/pack/pkg/archive/tar_builder.go new file mode 100644 index 0000000000..9659d3c39d --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/archive/tar_builder.go @@ -0,0 +1,94 @@ +package archive + +import ( + "archive/tar" + "io" + "os" + "time" + + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" +) + +type TarBuilder struct { + files []fileEntry +} + +type fileEntry struct { + typeFlag byte + path string + mode int64 + modTime time.Time + contents []byte +} + +func (t *TarBuilder) AddFile(path string, mode int64, modTime time.Time, contents []byte) { + t.files = append(t.files, fileEntry{ + typeFlag: tar.TypeReg, + path: path, + mode: mode, + modTime: modTime, + contents: contents, + }) +} + +func (t *TarBuilder) AddDir(path string, mode int64, modTime time.Time) { + t.files = append(t.files, fileEntry{ + typeFlag: tar.TypeDir, + path: path, + mode: mode, + modTime: modTime, + }) +} + +func (t *TarBuilder) Reader(twf TarWriterFactory) io.ReadCloser { + pr, pw := io.Pipe() + go func() { + var err error + defer func() { + pw.CloseWithError(err) + }() + _, err = t.WriteTo(pw, twf) + }() + + return pr +} + +func (t *TarBuilder) WriteToPath(path string, twf TarWriterFactory) error { + fh, err := os.Create(path) + if err != nil { + return errors.Wrapf(err, "create file for tar: %s", style.Symbol(path)) + } + defer fh.Close() + + _, err = t.WriteTo(fh, twf) + return err +} + +func (t *TarBuilder) WriteTo(w io.Writer, twf TarWriterFactory) (int64, error) { + var written int64 + tw := twf.NewWriter(w) + defer tw.Close() + + for _, f := range t.files { + if err := tw.WriteHeader(&tar.Header{ + Typeflag: f.typeFlag, + Name: f.path, + Size: int64(len(f.contents)), + Mode: f.mode, + ModTime: f.modTime, + }); err != nil { + return written, err + } + + n, err := tw.Write(f.contents) + if err != nil { + return written, err + } + + written += int64(n) + } + + return written, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/blob/blob.go b/vendor/github.com/buildpacks/pack/pkg/blob/blob.go new file mode 100644 index 0000000000..1846435c4f --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/blob/blob.go @@ -0,0 +1,80 @@ +package blob + +import ( + "bytes" + "compress/gzip" + "io" + "os" + + "github.com/docker/docker/pkg/ioutils" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/pkg/archive" +) + +type Blob interface { + Open() (io.ReadCloser, error) +} + +type blob struct { + path string +} + +func NewBlob(path string) Blob { + return &blob{path: path} +} + +// Open returns an io.ReadCloser whose contents are in tar archive format +func (b blob) Open() (r io.ReadCloser, err error) { + fi, err := os.Stat(b.path) + if err != nil { + return nil, errors.Wrapf(err, "read blob at path '%s'", b.path) + } + if fi.IsDir() { + return archive.ReadDirAsTar(b.path, ".", 0, 0, -1, true, false, nil), nil + } + + fh, err := os.Open(b.path) + if err != nil { + return nil, errors.Wrap(err, "open buildpack archive") + } + defer func() { + if err != nil { + fh.Close() + } + }() + + if ok, err := isGZip(fh); err != nil { + return nil, errors.Wrap(err, "check header") + } else if !ok { + return fh, nil + } + gzr, err := gzip.NewReader(fh) + if err != nil { + return nil, errors.Wrap(err, "create gzip reader") + } + + rc := ioutils.NewReadCloserWrapper(gzr, func() error { + defer fh.Close() + return gzr.Close() + }) + + return rc, nil +} + +func isGZip(file io.ReadSeeker) (bool, error) { + b := make([]byte, 3) + if _, err := file.Seek(0, 0); err != nil { + return false, err + } + _, err := file.Read(b) + if err != nil && err != io.EOF { + return false, err + } else if err == io.EOF { + return false, nil + } + if _, err := file.Seek(0, 0); err != nil { + return false, err + } + return bytes.Equal(b, []byte("\x1f\x8b\x08")), nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/blob/downloader.go b/vendor/github.com/buildpacks/pack/pkg/blob/downloader.go new file mode 100644 index 0000000000..1f5e9e5918 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/blob/downloader.go @@ -0,0 +1,196 @@ +package blob + +import ( + "context" + "crypto/sha256" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "path/filepath" + + "github.com/mitchellh/ioprogress" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/paths" + "github.com/buildpacks/pack/internal/style" +) + +const ( + cacheDirPrefix = "c" + cacheVersion = "2" +) + +type Logger interface { + Debugf(fmt string, v ...interface{}) + Infof(fmt string, v ...interface{}) + Writer() io.Writer +} + +type Downloader interface { + Download(ctx context.Context, pathOrURI string) (Blob, error) +} + +type downloader struct { + logger Logger + baseCacheDir string +} + +func NewDownloader(logger Logger, baseCacheDir string) Downloader { + return &downloader{ + logger: logger, + baseCacheDir: baseCacheDir, + } +} + +func (d *downloader) Download(ctx context.Context, pathOrURI string) (Blob, error) { + if paths.IsURI(pathOrURI) { + parsedURL, err := url.Parse(pathOrURI) + if err != nil { + return nil, errors.Wrapf(err, "parsing path/uri %s", style.Symbol(pathOrURI)) + } + + var path string + switch parsedURL.Scheme { + case "file": + path, err = paths.URIToFilePath(pathOrURI) + case "http", "https": + path, err = d.handleHTTP(ctx, pathOrURI) + default: + err = fmt.Errorf("unsupported protocol %s in URI %s", style.Symbol(parsedURL.Scheme), style.Symbol(pathOrURI)) + } + if err != nil { + return nil, err + } + + return &blob{path: path}, nil + } + + path := d.handleFile(pathOrURI) + + return &blob{path: path}, nil +} + +func (d *downloader) handleFile(path string) string { + path, err := filepath.Abs(path) + if err != nil { + return "" + } + + return path +} + +func (d *downloader) handleHTTP(ctx context.Context, uri string) (string, error) { + cacheDir := d.versionedCacheDir() + + if err := os.MkdirAll(cacheDir, 0750); err != nil { + return "", err + } + + cachePath := filepath.Join(cacheDir, fmt.Sprintf("%x", sha256.Sum256([]byte(uri)))) + + etagFile := cachePath + ".etag" + etagExists, err := fileExists(etagFile) + if err != nil { + return "", err + } + + etag := "" + if etagExists { + bytes, err := ioutil.ReadFile(filepath.Clean(etagFile)) + if err != nil { + return "", err + } + etag = string(bytes) + } + + reader, etag, err := d.downloadAsStream(ctx, uri, etag) + if err != nil { + return "", err + } else if reader == nil { + return cachePath, nil + } + defer reader.Close() + + fh, err := os.Create(cachePath) + if err != nil { + return "", errors.Wrapf(err, "create cache path %s", style.Symbol(cachePath)) + } + defer fh.Close() + + _, err = io.Copy(fh, reader) + if err != nil { + return "", errors.Wrap(err, "writing cache") + } + + if err = ioutil.WriteFile(etagFile, []byte(etag), 0744); err != nil { + return "", errors.Wrap(err, "writing etag") + } + + return cachePath, nil +} + +func (d *downloader) downloadAsStream(ctx context.Context, uri string, etag string) (io.ReadCloser, string, error) { + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return nil, "", err + } + req = req.WithContext(ctx) + + if etag != "" { + req.Header.Set("If-None-Match", etag) + } + + resp, err := (&http.Client{}).Do(req) //nolint:bodyclose + if err != nil { + return nil, "", err + } + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + d.logger.Infof("Downloading from %s", style.Symbol(uri)) + return withProgress(d.logger.Writer(), resp.Body, resp.ContentLength), resp.Header.Get("Etag"), nil + } + + if resp.StatusCode == 304 { + d.logger.Debugf("Using cached version of %s", style.Symbol(uri)) + return nil, etag, nil + } + + return nil, "", fmt.Errorf( + "could not download from %s, code http status %s", + style.Symbol(uri), style.SymbolF("%d", resp.StatusCode), + ) +} + +func withProgress(writer io.Writer, rc io.ReadCloser, length int64) io.ReadCloser { + return &progressReader{ + Closer: rc, + Reader: &ioprogress.Reader{ + Reader: rc, + Size: length, + DrawFunc: ioprogress.DrawTerminalf(writer, ioprogress.DrawTextFormatBytes), + }, + } +} + +type progressReader struct { + *ioprogress.Reader + io.Closer +} + +func (d *downloader) versionedCacheDir() string { + return filepath.Join(d.baseCacheDir, cacheDirPrefix+cacheVersion) +} + +func fileExists(file string) (bool, error) { + _, err := os.Stat(file) + if err != nil { + if os.IsNotExist(err) { + return false, nil + } + return false, err + } + return true, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/builder.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/builder.go new file mode 100644 index 0000000000..da0f8226c5 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/builder.go @@ -0,0 +1,295 @@ +package buildpack + +import ( + "archive/tar" + "compress/gzip" + "io/ioutil" + "os" + + "github.com/buildpacks/imgutil/layer" + + "github.com/buildpacks/imgutil" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/empty" + "github.com/google/go-containerregistry/pkg/v1/layout" + "github.com/google/go-containerregistry/pkg/v1/mutate" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/stack" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/archive" + "github.com/buildpacks/pack/pkg/dist" +) + +type ImageFactory interface { + NewImage(repoName string, local bool, imageOS string) (imgutil.Image, error) +} + +type WorkableImage interface { + SetLabel(string, string) error + AddLayerWithDiffID(path, diffID string) error +} + +type layoutImage struct { + v1.Image +} + +func (i *layoutImage) SetLabel(key string, val string) error { + configFile, err := i.ConfigFile() + if err != nil { + return err + } + config := *configFile.Config.DeepCopy() + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[key] = val + i.Image, err = mutate.Config(i.Image, config) + return err +} + +func (i *layoutImage) AddLayerWithDiffID(path, _ string) error { + tarLayer, err := tarball.LayerFromFile(path, tarball.WithCompressionLevel(gzip.DefaultCompression)) + if err != nil { + return err + } + i.Image, err = mutate.AppendLayers(i.Image, tarLayer) + if err != nil { + return errors.Wrap(err, "add layer") + } + return nil +} + +type PackageBuilder struct { + buildpack Buildpack + dependencies []Buildpack + imageFactory ImageFactory +} + +// TODO: Rename to PackageBuilder +func NewBuilder(imageFactory ImageFactory) *PackageBuilder { + return &PackageBuilder{ + imageFactory: imageFactory, + } +} + +func (b *PackageBuilder) SetBuildpack(buildpack Buildpack) { + b.buildpack = buildpack +} + +func (b *PackageBuilder) AddDependency(buildpack Buildpack) { + b.dependencies = append(b.dependencies, buildpack) +} + +func (b *PackageBuilder) finalizeImage(image WorkableImage, tmpDir string) error { + if err := dist.SetLabel(image, MetadataLabel, &Metadata{ + BuildpackInfo: b.buildpack.Descriptor().Info, + Stacks: b.resolvedStacks(), + }); err != nil { + return err + } + + bpLayers := dist.BuildpackLayers{} + for _, bp := range append(b.dependencies, b.buildpack) { + bpLayerTar, err := ToLayerTar(tmpDir, bp) + if err != nil { + return err + } + + diffID, err := dist.LayerDiffID(bpLayerTar) + if err != nil { + return errors.Wrapf(err, + "getting content hashes for buildpack %s", + style.Symbol(bp.Descriptor().Info.FullName()), + ) + } + + if err := image.AddLayerWithDiffID(bpLayerTar, diffID.String()); err != nil { + return errors.Wrapf(err, "adding layer tar for buildpack %s", style.Symbol(bp.Descriptor().Info.FullName())) + } + + dist.AddBuildpackToLayersMD(bpLayers, bp.Descriptor(), diffID.String()) + } + + if err := dist.SetLabel(image, dist.BuildpackLayersLabel, bpLayers); err != nil { + return err + } + + return nil +} + +func (b *PackageBuilder) validate() error { + if b.buildpack == nil { + return errors.New("buildpack must be set") + } + + if err := validateBuildpacks(b.buildpack, b.dependencies); err != nil { + return err + } + + if len(b.resolvedStacks()) == 0 { + return errors.Errorf("no compatible stacks among provided buildpacks") + } + + return nil +} + +func (b *PackageBuilder) resolvedStacks() []dist.Stack { + stacks := b.buildpack.Descriptor().Stacks + for _, bp := range b.dependencies { + bpd := bp.Descriptor() + + if len(stacks) == 0 { + stacks = bpd.Stacks + } else if len(bpd.Stacks) > 0 { // skip over "meta-buildpacks" + stacks = stack.MergeCompatible(stacks, bpd.Stacks) + } + } + + return stacks +} + +func (b *PackageBuilder) SaveAsFile(path, imageOS string) error { + if err := b.validate(); err != nil { + return err + } + + layoutImage, err := newLayoutImage(imageOS) + if err != nil { + return errors.Wrap(err, "creating layout image") + } + + tmpDir, err := ioutil.TempDir("", "package-buildpack") + if err != nil { + return err + } + defer os.RemoveAll(tmpDir) + + if err := b.finalizeImage(layoutImage, tmpDir); err != nil { + return err + } + + layoutDir, err := ioutil.TempDir(tmpDir, "oci-layout") + if err != nil { + return errors.Wrap(err, "creating oci-layout temp dir") + } + + p, err := layout.Write(layoutDir, empty.Index) + if err != nil { + return errors.Wrap(err, "writing index") + } + + if err := p.AppendImage(layoutImage); err != nil { + return errors.Wrap(err, "writing layout") + } + + outputFile, err := os.Create(path) + if err != nil { + return errors.Wrap(err, "creating output file") + } + defer outputFile.Close() + + tw := tar.NewWriter(outputFile) + defer tw.Close() + + return archive.WriteDirToTar(tw, layoutDir, "/", 0, 0, 0755, true, false, nil) +} + +func newLayoutImage(imageOS string) (*layoutImage, error) { + i := empty.Image + + configFile, err := i.ConfigFile() + if err != nil { + return nil, err + } + + configFile.OS = imageOS + i, err = mutate.ConfigFile(i, configFile) + if err != nil { + return nil, err + } + + if imageOS == "windows" { + baseLayerReader, err := layer.WindowsBaseLayer() + if err != nil { + return nil, err + } + + baseLayer, err := tarball.LayerFromReader(baseLayerReader, tarball.WithCompressionLevel(gzip.DefaultCompression)) + if err != nil { + return nil, err + } + + i, err = mutate.AppendLayers(i, baseLayer) + if err != nil { + return nil, err + } + } + + return &layoutImage{Image: i}, nil +} + +func (b *PackageBuilder) SaveAsImage(repoName string, publish bool, imageOS string) (imgutil.Image, error) { + if err := b.validate(); err != nil { + return nil, err + } + + image, err := b.imageFactory.NewImage(repoName, !publish, imageOS) + if err != nil { + return nil, errors.Wrapf(err, "creating image") + } + + tmpDir, err := ioutil.TempDir("", "package-buildpack") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpDir) + + if err := b.finalizeImage(image, tmpDir); err != nil { + return nil, err + } + + if err := image.Save(); err != nil { + return nil, err + } + + return image, nil +} + +func validateBuildpacks(mainBP Buildpack, depBPs []Buildpack) error { + depsWithRefs := map[string][]dist.BuildpackInfo{} + + for _, bp := range depBPs { + depsWithRefs[bp.Descriptor().Info.FullName()] = nil + } + + for _, bp := range append([]Buildpack{mainBP}, depBPs...) { // List of everything + bpd := bp.Descriptor() + for _, orderEntry := range bpd.Order { + for _, groupEntry := range orderEntry.Group { + if _, ok := depsWithRefs[groupEntry.BuildpackInfo.FullName()]; !ok { + return errors.Errorf( + "buildpack %s references buildpack %s which is not present", + style.Symbol(bpd.Info.FullName()), + style.Symbol(groupEntry.FullName()), + ) + } + + depsWithRefs[groupEntry.BuildpackInfo.FullName()] = append(depsWithRefs[groupEntry.BuildpackInfo.FullName()], bpd.Info) + } + } + } + + for bp, refs := range depsWithRefs { + if len(refs) == 0 { + return errors.Errorf( + "buildpack %s is not used by buildpack %s", + style.Symbol(bp), + style.Symbol(mainBP.Descriptor().Info.FullName()), + ) + } + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/buildpack.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/buildpack.go new file mode 100644 index 0000000000..d3713b6cf8 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/buildpack.go @@ -0,0 +1,242 @@ +package buildpack + +import ( + "archive/tar" + "fmt" + "io" + "os" + "path" + "path/filepath" + + "github.com/BurntSushi/toml" + "github.com/buildpacks/lifecycle/api" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/archive" + "github.com/buildpacks/pack/pkg/dist" +) + +type Blob interface { + // Open returns a io.ReadCloser for the contents of the Blob in tar format. + Open() (io.ReadCloser, error) +} + +//go:generate mockgen -package testmocks -destination ../testmocks/mock_buildpack.go github.com/buildpacks/pack/pkg/buildpack Buildpack + +type Buildpack interface { + // Open returns a reader to a tar with contents structured as per the distribution spec + // (currently '/cnbs/buildpacks/{ID}/{version}/*', all entries with a zeroed-out + // timestamp and root UID/GID). + Open() (io.ReadCloser, error) + Descriptor() dist.BuildpackDescriptor +} + +type buildpack struct { + descriptor dist.BuildpackDescriptor + Blob `toml:"-"` +} + +func (b *buildpack) Descriptor() dist.BuildpackDescriptor { + return b.descriptor +} + +// FromBlob constructs a buildpack from a blob. It is assumed that the buildpack +// contents are structured as per the distribution spec (currently '/cnbs/buildpacks/{ID}/{version}/*'). +func FromBlob(bpd dist.BuildpackDescriptor, blob Blob) Buildpack { + return &buildpack{ + Blob: blob, + descriptor: bpd, + } +} + +// FromRootBlob constructs a buildpack from a blob. It is assumed that the buildpack contents reside at the +// root of the blob. The constructed buildpack contents will be structured as per the distribution spec (currently +// a tar with contents under '/cnbs/buildpacks/{ID}/{version}/*'). +func FromRootBlob(blob Blob, layerWriterFactory archive.TarWriterFactory) (Buildpack, error) { + bpd := dist.BuildpackDescriptor{} + rc, err := blob.Open() + if err != nil { + return nil, errors.Wrap(err, "open buildpack") + } + defer rc.Close() + + _, buf, err := archive.ReadTarEntry(rc, "buildpack.toml") + if err != nil { + return nil, errors.Wrap(err, "reading buildpack.toml") + } + + bpd.API = api.MustParse(dist.AssumedBuildpackAPIVersion) + _, err = toml.Decode(string(buf), &bpd) + if err != nil { + return nil, errors.Wrap(err, "decoding buildpack.toml") + } + + err = validateDescriptor(bpd) + if err != nil { + return nil, errors.Wrap(err, "invalid buildpack.toml") + } + + return &buildpack{ + descriptor: bpd, + Blob: &distBlob{ + openFn: func() io.ReadCloser { + return archive.GenerateTarWithWriter( + func(tw archive.TarWriter) error { + return toDistTar(tw, bpd, blob) + }, + layerWriterFactory, + ) + }, + }, + }, nil +} + +type distBlob struct { + openFn func() io.ReadCloser +} + +func (b *distBlob) Open() (io.ReadCloser, error) { + return b.openFn(), nil +} + +func toDistTar(tw archive.TarWriter, bpd dist.BuildpackDescriptor, blob Blob) error { + ts := archive.NormalizedDateTime + + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: path.Join(dist.BuildpacksDir, bpd.EscapedID()), + Mode: 0755, + ModTime: ts, + }); err != nil { + return errors.Wrapf(err, "writing buildpack id dir header") + } + + baseTarDir := path.Join(dist.BuildpacksDir, bpd.EscapedID(), bpd.Info.Version) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: baseTarDir, + Mode: 0755, + ModTime: ts, + }); err != nil { + return errors.Wrapf(err, "writing buildpack version dir header") + } + + rc, err := blob.Open() + if err != nil { + return errors.Wrap(err, "reading buildpack blob") + } + defer rc.Close() + + tr := tar.NewReader(rc) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return errors.Wrap(err, "failed to get next tar entry") + } + + archive.NormalizeHeader(header, true) + header.Name = path.Clean(header.Name) + if header.Name == "." || header.Name == "/" { + continue + } + + header.Mode = calcFileMode(header) + header.Name = path.Join(baseTarDir, header.Name) + err = tw.WriteHeader(header) + if err != nil { + return errors.Wrapf(err, "failed to write header for '%s'", header.Name) + } + + _, err = io.Copy(tw, tr) + if err != nil { + return errors.Wrapf(err, "failed to write contents to '%s'", header.Name) + } + } + + return nil +} + +func calcFileMode(header *tar.Header) int64 { + switch { + case header.Typeflag == tar.TypeDir: + return 0755 + case nameOneOf(header.Name, + path.Join("bin", "detect"), + path.Join("bin", "build"), + ): + return 0755 + case anyExecBit(header.Mode): + return 0755 + } + + return 0644 +} + +func nameOneOf(name string, paths ...string) bool { + for _, p := range paths { + if name == p { + return true + } + } + return false +} + +func anyExecBit(mode int64) bool { + return mode&0111 != 0 +} + +func validateDescriptor(bpd dist.BuildpackDescriptor) error { + if bpd.Info.ID == "" { + return errors.Errorf("%s is required", style.Symbol("buildpack.id")) + } + + if bpd.Info.Version == "" { + return errors.Errorf("%s is required", style.Symbol("buildpack.version")) + } + + if len(bpd.Order) == 0 && len(bpd.Stacks) == 0 { + return errors.Errorf( + "buildpack %s: must have either %s or an %s defined", + style.Symbol(bpd.Info.FullName()), + style.Symbol("stacks"), + style.Symbol("order"), + ) + } + + if len(bpd.Order) >= 1 && len(bpd.Stacks) >= 1 { + return errors.Errorf( + "buildpack %s: cannot have both %s and an %s defined", + style.Symbol(bpd.Info.FullName()), + style.Symbol("stacks"), + style.Symbol("order"), + ) + } + + return nil +} + +func ToLayerTar(dest string, bp Buildpack) (string, error) { + bpd := bp.Descriptor() + bpReader, err := bp.Open() + if err != nil { + return "", errors.Wrap(err, "opening buildpack blob") + } + defer bpReader.Close() + + layerTar := filepath.Join(dest, fmt.Sprintf("%s.%s.tar", bpd.EscapedID(), bpd.Info.Version)) + fh, err := os.Create(layerTar) + if err != nil { + return "", errors.Wrap(err, "create file for tar") + } + defer fh.Close() + + if _, err := io.Copy(fh, bpReader); err != nil { + return "", errors.Wrap(err, "writing buildpack blob to tar") + } + + return layerTar, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/buildpackage.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/buildpackage.go new file mode 100644 index 0000000000..df40107355 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/buildpackage.go @@ -0,0 +1,13 @@ +package buildpack + +import ( + "github.com/buildpacks/pack/pkg/dist" +) + +// TODO: Move to dist +const MetadataLabel = "io.buildpacks.buildpackage.metadata" + +type Metadata struct { + dist.BuildpackInfo + Stacks []dist.Stack `toml:"stacks" json:"stacks"` +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/downloader.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/downloader.go new file mode 100644 index 0000000000..bb3545f701 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/downloader.go @@ -0,0 +1,175 @@ +package buildpack + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/buildpacks/imgutil" + + "github.com/buildpacks/pack/internal/layer" + "github.com/buildpacks/pack/internal/paths" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/blob" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +type Logger interface { + Debug(msg string) + Debugf(fmt string, v ...interface{}) + Info(msg string) + Infof(fmt string, v ...interface{}) + Warn(msg string) + Warnf(fmt string, v ...interface{}) + Error(msg string) + Errorf(fmt string, v ...interface{}) +} + +type ImageFetcher interface { + Fetch(ctx context.Context, name string, options image.FetchOptions) (imgutil.Image, error) +} + +type Downloader interface { + Download(ctx context.Context, pathOrURI string) (blob.Blob, error) +} + +//go:generate mockgen -package testmocks -destination ../testmocks/mock_registry_resolver.go github.com/buildpacks/pack/pkg/buildpack RegistryResolver + +type RegistryResolver interface { + Resolve(registryName, bpURI string) (string, error) +} + +type buildpackDownloader struct { + logger Logger + imageFetcher ImageFetcher + downloader Downloader + registryResolver RegistryResolver +} + +func NewDownloader(logger Logger, imageFetcher ImageFetcher, downloader Downloader, registryResolver RegistryResolver) *buildpackDownloader { //nolint:golint,gosimple + return &buildpackDownloader{ + logger: logger, + imageFetcher: imageFetcher, + downloader: downloader, + registryResolver: registryResolver, + } +} + +type DownloadOptions struct { + // Buildpack registry name. Defines where all registry buildpacks will be pulled from. + RegistryName string + + // The base directory to use to resolve relative assets + RelativeBaseDir string + + // The OS of the builder image + ImageOS string + + // Deprecated, the older alternative to buildpack URI + ImageName string + + Daemon bool + + PullPolicy image.PullPolicy +} + +func (c *buildpackDownloader) Download(ctx context.Context, buildpackURI string, opts DownloadOptions) (Buildpack, []Buildpack, error) { + var err error + var locatorType LocatorType + if buildpackURI == "" && opts.ImageName != "" { + c.logger.Warn("The 'image' key is deprecated. Use 'uri=\"docker://...\"' instead.") + buildpackURI = opts.ImageName + locatorType = PackageLocator + } else { + locatorType, err = GetLocatorType(buildpackURI, opts.RelativeBaseDir, []dist.BuildpackInfo{}) + if err != nil { + return nil, nil, err + } + } + + var mainBP Buildpack + var depBPs []Buildpack + switch locatorType { + case PackageLocator: + imageName := ParsePackageLocator(buildpackURI) + c.logger.Debugf("Downloading buildpack from image: %s", style.Symbol(imageName)) + mainBP, depBPs, err = extractPackagedBuildpacks(ctx, imageName, c.imageFetcher, image.FetchOptions{Daemon: opts.Daemon, PullPolicy: opts.PullPolicy}) + if err != nil { + return nil, nil, errors.Wrapf(err, "extracting from registry %s", style.Symbol(buildpackURI)) + } + case RegistryLocator: + c.logger.Debugf("Downloading buildpack from registry: %s", style.Symbol(buildpackURI)) + address, err := c.registryResolver.Resolve(opts.RegistryName, buildpackURI) + if err != nil { + return nil, nil, errors.Wrapf(err, "locating in registry: %s", style.Symbol(buildpackURI)) + } + + mainBP, depBPs, err = extractPackagedBuildpacks(ctx, address, c.imageFetcher, image.FetchOptions{Daemon: opts.Daemon, PullPolicy: opts.PullPolicy}) + if err != nil { + return nil, nil, errors.Wrapf(err, "extracting from registry %s", style.Symbol(buildpackURI)) + } + case URILocator: + buildpackURI, err = paths.FilePathToURI(buildpackURI, opts.RelativeBaseDir) + if err != nil { + return nil, nil, errors.Wrapf(err, "making absolute: %s", style.Symbol(buildpackURI)) + } + + c.logger.Debugf("Downloading buildpack from URI: %s", style.Symbol(buildpackURI)) + + blob, err := c.downloader.Download(ctx, buildpackURI) + if err != nil { + return nil, nil, errors.Wrapf(err, "downloading buildpack from %s", style.Symbol(buildpackURI)) + } + + mainBP, depBPs, err = decomposeBuildpack(blob, opts.ImageOS) + if err != nil { + return nil, nil, errors.Wrapf(err, "extracting from %s", style.Symbol(buildpackURI)) + } + default: + return nil, nil, fmt.Errorf("error reading %s: invalid locator: %s", buildpackURI, locatorType) + } + return mainBP, depBPs, nil +} + +// decomposeBuildpack decomposes a buildpack blob into the main builder (order buildpack) and it's dependencies buildpacks. +func decomposeBuildpack(blob blob.Blob, imageOS string) (mainBP Buildpack, depBPs []Buildpack, err error) { + isOCILayout, err := IsOCILayoutBlob(blob) + if err != nil { + return mainBP, depBPs, errors.Wrap(err, "inspecting buildpack blob") + } + + if isOCILayout { + mainBP, depBPs, err = BuildpacksFromOCILayoutBlob(blob) + if err != nil { + return mainBP, depBPs, errors.Wrap(err, "extracting buildpacks") + } + } else { + layerWriterFactory, err := layer.NewWriterFactory(imageOS) + if err != nil { + return mainBP, depBPs, errors.Wrapf(err, "get tar writer factory for OS %s", style.Symbol(imageOS)) + } + + mainBP, err = FromRootBlob(blob, layerWriterFactory) + if err != nil { + return mainBP, depBPs, errors.Wrap(err, "reading buildpack") + } + } + + return mainBP, depBPs, nil +} + +func extractPackagedBuildpacks(ctx context.Context, pkgImageRef string, fetcher ImageFetcher, fetchOptions image.FetchOptions) (mainBP Buildpack, depBPs []Buildpack, err error) { + pkgImage, err := fetcher.Fetch(ctx, pkgImageRef, fetchOptions) + if err != nil { + return nil, nil, errors.Wrapf(err, "fetching image") + } + + mainBP, depBPs, err = ExtractBuildpacks(pkgImage) + if err != nil { + return nil, nil, errors.Wrapf(err, "extracting buildpacks from %s", style.Symbol(pkgImageRef)) + } + + return mainBP, depBPs, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/locator_type.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/locator_type.go new file mode 100644 index 0000000000..2f1a74bb5b --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/locator_type.go @@ -0,0 +1,147 @@ +package buildpack + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "strings" + + "github.com/google/go-containerregistry/pkg/name" + + "github.com/buildpacks/pack/internal/paths" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/dist" +) + +type LocatorType int + +const ( + InvalidLocator LocatorType = iota + FromBuilderLocator + URILocator + IDLocator + PackageLocator + RegistryLocator + // added entries here should also be added to `String()` +) + +const ( + fromBuilderPrefix = "urn:cnb:builder" + deprecatedFromBuilderPrefix = "from=builder" + fromRegistryPrefix = "urn:cnb:registry" + fromDockerPrefix = "docker:/" +) + +var ( + // https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string + semverPattern = `(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?` + registryPattern = regexp.MustCompile(`^[a-z0-9\-\.]+\/[a-z0-9\-\.]+(?:@` + semverPattern + `)?$`) +) + +func (l LocatorType) String() string { + return []string{ + "InvalidLocator", + "FromBuilderLocator", + "URILocator", + "IDLocator", + "PackageLocator", + "RegistryLocator", + }[l] +} + +// GetLocatorType determines which type of locator is designated by the given input. +// If a type cannot be determined, `INVALID_LOCATOR` will be returned. If an error +// is encountered, it will be returned. +func GetLocatorType(locator string, relativeBaseDir string, buildpacksFromBuilder []dist.BuildpackInfo) (LocatorType, error) { + if locator == deprecatedFromBuilderPrefix { + return FromBuilderLocator, nil + } + + if strings.HasPrefix(locator, fromBuilderPrefix+":") || strings.HasPrefix(locator, deprecatedFromBuilderPrefix+":") { + if !isFoundInBuilder(locator, buildpacksFromBuilder) { + return InvalidLocator, fmt.Errorf("%s is not a valid identifier", style.Symbol(locator)) + } + return IDLocator, nil + } + + if strings.HasPrefix(locator, fromRegistryPrefix+":") { + return RegistryLocator, nil + } + + if paths.IsURI(locator) { + if HasDockerLocator(locator) { + if _, err := name.ParseReference(locator); err == nil { + return PackageLocator, nil + } + } + return URILocator, nil + } + + return parseNakedLocator(locator, relativeBaseDir, buildpacksFromBuilder), nil +} + +func HasDockerLocator(locator string) bool { + return strings.HasPrefix(locator, fromDockerPrefix) +} + +func parseNakedLocator(locator, relativeBaseDir string, buildpacksFromBuilder []dist.BuildpackInfo) LocatorType { + // from here on, we're dealing with a naked locator, and we try to figure out what it is. To do this we check + // the following characteristics in order: + // 1. Does it match a path on the file system + // 2. Does it match a buildpack ID in the builder + // 3. Does it look like a Buildpack Registry ID + // 4. Does it look like a Docker ref + + if isLocalFile(locator, relativeBaseDir) { + return URILocator + } + + if isFoundInBuilder(locator, buildpacksFromBuilder) { + return IDLocator + } + + if canBeRegistryRef(locator) { + return RegistryLocator + } + + if canBePackageRef(locator) { + return PackageLocator + } + + return InvalidLocator +} + +func canBePackageRef(locator string) bool { + if _, err := name.ParseReference(locator); err == nil { + return true + } + + return false +} + +func canBeRegistryRef(locator string) bool { + return registryPattern.MatchString(locator) +} + +func isFoundInBuilder(locator string, candidates []dist.BuildpackInfo) bool { + id, version := ParseIDLocator(locator) + for _, c := range candidates { + if id == c.ID && (version == "" || version == c.Version) { + return true + } + } + return false +} + +func isLocalFile(locator, relativeBaseDir string) bool { + if !filepath.IsAbs(locator) { + locator = filepath.Join(relativeBaseDir, locator) + } + + if _, err := os.Stat(locator); err == nil { + return true + } + + return false +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/oci_layout_package.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/oci_layout_package.go new file mode 100644 index 0000000000..4be4ac6882 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/oci_layout_package.go @@ -0,0 +1,193 @@ +package buildpack + +import ( + "archive/tar" + "compress/gzip" + "encoding/json" + "io" + "path" + "strings" + + "github.com/docker/docker/pkg/ioutils" + v1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/archive" + blob2 "github.com/buildpacks/pack/pkg/blob" + "github.com/buildpacks/pack/pkg/dist" +) + +// IsOCILayoutBlob checks whether a blob is in OCI layout format. +func IsOCILayoutBlob(blob blob2.Blob) (bool, error) { + readCloser, err := blob.Open() + if err != nil { + return false, err + } + defer readCloser.Close() + + _, _, err = archive.ReadTarEntry(readCloser, "/oci-layout") + if err != nil { + if archive.IsEntryNotExist(err) { + return false, nil + } + + return false, err + } + + return true, nil +} + +// BuildpackFromOCILayoutBlob constructs buildpacks from a blob in OCI layout format. +func BuildpacksFromOCILayoutBlob(blob Blob) (mainBP Buildpack, dependencies []Buildpack, err error) { + layoutPackage, err := newOCILayoutPackage(blob) + if err != nil { + return nil, nil, err + } + + return ExtractBuildpacks(layoutPackage) +} + +func ConfigFromOCILayoutBlob(blob Blob) (config v1.ImageConfig, err error) { + layoutPackage, err := newOCILayoutPackage(blob) + if err != nil { + return v1.ImageConfig{}, err + } + return layoutPackage.imageInfo.Config, nil +} + +type ociLayoutPackage struct { + imageInfo v1.Image + manifest v1.Manifest + blob Blob +} + +func newOCILayoutPackage(blob Blob) (*ociLayoutPackage, error) { + index := &v1.Index{} + + if err := unmarshalJSONFromBlob(blob, "/index.json", index); err != nil { + return nil, err + } + + var manifestDescriptor *v1.Descriptor + for _, m := range index.Manifests { + if m.MediaType == "application/vnd.docker.distribution.manifest.v2+json" { + manifestDescriptor = &m // nolint:scopelint + break + } + } + + if manifestDescriptor == nil { + return nil, errors.New("unable to find manifest") + } + + manifest := &v1.Manifest{} + if err := unmarshalJSONFromBlob(blob, pathFromDescriptor(*manifestDescriptor), manifest); err != nil { + return nil, err + } + + imageInfo := &v1.Image{} + if err := unmarshalJSONFromBlob(blob, pathFromDescriptor(manifest.Config), imageInfo); err != nil { + return nil, err + } + + layersLabel := imageInfo.Config.Labels[dist.BuildpackLayersLabel] + if layersLabel == "" { + return nil, errors.Errorf("label %s not found", style.Symbol(dist.BuildpackLayersLabel)) + } + + bpLayers := dist.BuildpackLayers{} + if err := json.Unmarshal([]byte(layersLabel), &bpLayers); err != nil { + return nil, errors.Wrap(err, "unmarshaling layers label") + } + + return &ociLayoutPackage{ + imageInfo: *imageInfo, + manifest: *manifest, + blob: blob, + }, nil +} + +func (o *ociLayoutPackage) Label(name string) (value string, err error) { + return o.imageInfo.Config.Labels[name], nil +} + +func (o *ociLayoutPackage) GetLayer(diffID string) (io.ReadCloser, error) { + index := -1 + for i, dID := range o.imageInfo.RootFS.DiffIDs { + if dID.String() == diffID { + index = i + break + } + } + if index == -1 { + return nil, errors.Errorf("layer %s not found in rootfs", style.Symbol(diffID)) + } + + layerDescriptor := o.manifest.Layers[index] + layerPath := pathFromDescriptor(layerDescriptor) + + blobReader, err := o.blob.Open() + if err != nil { + return nil, err + } + + tr := tar.NewReader(blobReader) + for { + header, err := tr.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, errors.Wrap(err, "failed to get next tar entry") + } + + if path.Clean(header.Name) == path.Clean(layerPath) { + finalReader := blobReader + + if strings.HasSuffix(layerDescriptor.MediaType, ".gzip") { + finalReader, err = gzip.NewReader(tr) + if err != nil { + return nil, err + } + } + + return ioutils.NewReadCloserWrapper(finalReader, func() error { + if err := finalReader.Close(); err != nil { + return err + } + + return blobReader.Close() + }), nil + } + } + + if err := blobReader.Close(); err != nil { + return nil, err + } + + return nil, errors.Errorf("layer blob %s not found", style.Symbol(layerPath)) +} + +func pathFromDescriptor(descriptor v1.Descriptor) string { + return path.Join("/blobs", descriptor.Digest.Algorithm().String(), descriptor.Digest.Encoded()) +} + +func unmarshalJSONFromBlob(blob Blob, path string, obj interface{}) error { + reader, err := blob.Open() + if err != nil { + return err + } + defer reader.Close() + + _, contents, err := archive.ReadTarEntry(reader, path) + if err != nil { + return err + } + + if err = json.Unmarshal(contents, obj); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/package.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/package.go new file mode 100644 index 0000000000..c8ae73fb8c --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/package.go @@ -0,0 +1,87 @@ +package buildpack + +import ( + "io" + + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/dist" +) + +type Package interface { + Label(name string) (value string, err error) + GetLayer(diffID string) (io.ReadCloser, error) +} + +func ExtractBuildpacks(pkg Package) (mainBP Buildpack, depBPs []Buildpack, err error) { + md := &Metadata{} + if found, err := dist.GetLabel(pkg, MetadataLabel, md); err != nil { + return nil, nil, err + } else if !found { + return nil, nil, errors.Errorf( + "could not find label %s", + style.Symbol(MetadataLabel), + ) + } + + bpLayers := dist.BuildpackLayers{} + ok, err := dist.GetLabel(pkg, dist.BuildpackLayersLabel, &bpLayers) + if err != nil { + return nil, nil, err + } + + if !ok { + return nil, nil, errors.Errorf( + "could not find label %s", + style.Symbol(dist.BuildpackLayersLabel), + ) + } + + for bpID, v := range bpLayers { + for bpVersion, bpInfo := range v { + desc := dist.BuildpackDescriptor{ + API: bpInfo.API, + Info: dist.BuildpackInfo{ + ID: bpID, + Version: bpVersion, + Homepage: bpInfo.Homepage, + Name: bpInfo.Name, + }, + Stacks: bpInfo.Stacks, + Order: bpInfo.Order, + } + + diffID := bpInfo.LayerDiffID // Allow use in closure + b := &openerBlob{ + opener: func() (io.ReadCloser, error) { + rc, err := pkg.GetLayer(diffID) + if err != nil { + return nil, errors.Wrapf(err, + "extracting buildpack %s layer (diffID %s)", + style.Symbol(desc.Info.FullName()), + style.Symbol(diffID), + ) + } + return rc, nil + }, + } + + if desc.Info.Match(md.BuildpackInfo) { // This is the order buildpack of the package + mainBP = FromBlob(desc, b) + } else { + depBPs = append(depBPs, FromBlob(desc, b)) + } + } + } + + return mainBP, depBPs, nil +} + +type openerBlob struct { + opener func() (io.ReadCloser, error) +} + +func (b *openerBlob) Open() (io.ReadCloser, error) { + return b.opener() +} diff --git a/vendor/github.com/buildpacks/pack/pkg/buildpack/parse_name.go b/vendor/github.com/buildpacks/pack/pkg/buildpack/parse_name.go new file mode 100644 index 0000000000..db00972241 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/buildpack/parse_name.go @@ -0,0 +1,60 @@ +package buildpack + +import ( + "fmt" + "strings" +) + +// ParseIDLocator parses a buildpack locator in the following formats into its ID and version. +// +// - [@] +// - urn:cnb:builder:[@] +// - urn:cnb:registry:[@] +// - from=builder:[@] (deprecated) +// +// If version is omitted, the version returned will be empty. Any "from=builder:" or "urn:cnb" prefix will be ignored. +func ParseIDLocator(locator string) (id string, version string) { + nakedLocator := parseRegistryLocator(parseBuilderLocator(locator)) + + parts := strings.Split(nakedLocator, "@") + if len(parts) == 2 { + return parts[0], parts[1] + } + return parts[0], "" +} + +// ParsePackageLocator parses a locator (in format `[docker://][/][:⏐@]`) to image name (`[/][:⏐@]`) +func ParsePackageLocator(locator string) (imageName string) { + return strings.TrimPrefix( + strings.TrimPrefix( + strings.TrimPrefix(locator, fromDockerPrefix+"//"), + fromDockerPrefix+"/"), + fromDockerPrefix) +} + +// ParseRegistryID parses a registry id (ie. `/@`) into namespace, name and version components. +// +// Supported formats: +// - /[@] +// - urn:cnb:registry:/[@] +// +func ParseRegistryID(registryID string) (namespace string, name string, version string, err error) { + id, version := ParseIDLocator(registryID) + + parts := strings.Split(id, "/") + if len(parts) != 2 { + return "", "", "", fmt.Errorf("invalid registry ID: %s", registryID) + } + + return parts[0], parts[1], version, nil +} + +func parseRegistryLocator(locator string) (path string) { + return strings.TrimPrefix(locator, fromRegistryPrefix+":") +} + +func parseBuilderLocator(locator string) (path string) { + return strings.TrimPrefix( + strings.TrimPrefix(locator, deprecatedFromBuilderPrefix+":"), + fromBuilderPrefix+":") +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/build.go b/vendor/github.com/buildpacks/pack/pkg/client/build.go new file mode 100644 index 0000000000..3c47865589 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/build.go @@ -0,0 +1,931 @@ +package client + +import ( + "context" + "crypto/rand" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + + "github.com/Masterminds/semver" + "github.com/buildpacks/imgutil" + "github.com/buildpacks/imgutil/local" + "github.com/buildpacks/imgutil/remote" + "github.com/buildpacks/lifecycle/platform" + "github.com/docker/docker/api/types" + "github.com/docker/docker/volume/mounts" + "github.com/google/go-containerregistry/pkg/name" + "github.com/pkg/errors" + ignore "github.com/sabhiram/go-gitignore" + + "github.com/buildpacks/pack/internal/build" + "github.com/buildpacks/pack/internal/builder" + internalConfig "github.com/buildpacks/pack/internal/config" + pname "github.com/buildpacks/pack/internal/name" + "github.com/buildpacks/pack/internal/stack" + "github.com/buildpacks/pack/internal/stringset" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/internal/termui" + "github.com/buildpacks/pack/pkg/archive" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" + "github.com/buildpacks/pack/pkg/logging" + projectTypes "github.com/buildpacks/pack/pkg/project/types" +) + +const ( + minLifecycleVersionSupportingCreator = "0.7.4" + prevLifecycleVersionSupportingImage = "0.6.1" + minLifecycleVersionSupportingImage = "0.7.5" +) + +// LifecycleExecutor executes the lifecycle which satisfies the Cloud Native Buildpacks Lifecycle specification. +// Implementations of the Lifecycle must execute the following phases by calling the +// phase-specific lifecycle binary in order: +// +// Detection: /cnb/lifecycle/detector +// Analysis: /cnb/lifecycle/analyzer +// Cache Restoration: /cnb/lifecycle/restorer +// Build: /cnb/lifecycle/builder +// Export: /cnb/lifecycle/exporter +// +// or invoke the single creator binary: +// +// Creator: /cnb/lifecycle/creator +// +type LifecycleExecutor interface { + // Execute is responsible for invoking each of these binaries + // with the desired configuration. + Execute(ctx context.Context, opts build.LifecycleOptions) error +} + +type IsTrustedBuilder func(string) bool + +// BuildOptions defines configuration settings for a Build. +type BuildOptions struct { + // The base directory to use to resolve relative assets + RelativeBaseDir string + + // required. Name of output image. + Image string + + // required. Builder image name. + Builder string + + // Name of the buildpack registry. Used to + // add buildpacks to a build. + Registry string + + // AppPath is the path to application bits. + // If unset it defaults to current working directory. + AppPath string + + // Specify the run image the Image will be + // built atop. + RunImage string + + // Address of docker daemon exposed to build container + // e.g. tcp://example.com:1234, unix:///run/user/1000/podman/podman.sock + DockerHost string + + // Used to determine a run-image mirror if Run Image is empty. + // Used in combination with Builder metadata to determine to the the 'best' mirror. + // 'best' is defined as: + // - if Publish is true, the best mirror matches registry we are publishing to. + // - if Publish is false, the best mirror matches a registry specified in Image. + // - otherwise if both of the above did not match, use mirror specified in + // the builder metadata + AdditionalMirrors map[string][]string + + // User provided environment variables to the buildpacks. + // Buildpacks may both read and overwrite these values. + Env map[string]string + + // Option only valid if Publish is true + // Create an additional image that contains cache=true layers and push it to the registry. + CacheImage string + + // Option passed directly to the lifecycle. + // If true, publishes Image directly to a registry. + // Assumes Image contains a valid registry with credentials + // provided by the docker client. + Publish bool + + // Clear the build cache from previous builds. + ClearCache bool + + // Launch a terminal UI to depict the build process + Interactive bool + + // List of buildpack images or archives to add to a builder. + // These buildpacks may overwrite those on the builder if they + // share both an ID and Version with a buildpack on the builder. + Buildpacks []string + + // Additional image tags to push to, each will contain contents identical to Image + AdditionalTags []string + + // Configure the proxy environment variables, + // These variables will only be set in the build image + // and will not be used if proxy env vars are already set. + ProxyConfig *ProxyConfig + + // Configure network and volume mounts for the build containers. + ContainerConfig ContainerConfig + + // Process type that will be used when setting container start command. + DefaultProcessType string + + // Strategy for updating local images before a build. + PullPolicy image.PullPolicy + + // ProjectDescriptorBaseDir is the base directory to find relative resources referenced by the ProjectDescriptor + ProjectDescriptorBaseDir string + + // ProjectDescriptor describes the project and any configuration specific to the project + ProjectDescriptor projectTypes.Descriptor + + // The lifecycle image that will be used for the analysis, restore and export phases + // when using an untrusted builder. + LifecycleImage string + + // The location at which to mount the AppDir in the build image. + Workspace string + + // User's group id used to build the image + GroupID int + + // A previous image to set to a particular tag reference, digest reference, or (when performing a daemon build) image ID; + PreviousImage string + + // TrustBuilder when true optimizes builds by running + // all lifecycle phases in a single container. + // This places registry credentials on the builder's build image. + // Only trust builders from reputable sources. + TrustBuilder IsTrustedBuilder +} + +// ProxyConfig specifies proxy setting to be set as environment variables in a container. +type ProxyConfig struct { + HTTPProxy string // Used to set HTTP_PROXY env var. + HTTPSProxy string // Used to set HTTPS_PROXY env var. + NoProxy string // Used to set NO_PROXY env var. +} + +// ContainerConfig is additional configuration of the docker container that all build steps +// occur within. +type ContainerConfig struct { + // Configure network settings of the build containers. + // The value of Network is handed directly to the docker client. + // For valid values of this field see: + // https://docs.docker.com/network/#network-drivers + Network string + + // Volumes are accessible during both detect build phases + // should have the form: /path/in/host:/path/in/container. + // For more about volume mounts, and their permissions see: + // https://docs.docker.com/storage/volumes/ + // + // It is strongly recommended you do not override any of the + // paths with volume mounts at the following locations: + // - /cnb + // - /layers + // - anything below /cnb/** + Volumes []string +} + +var IsSuggestedBuilderFunc = func(b string) bool { + for _, suggestedBuilder := range builder.SuggestedBuilders { + if b == suggestedBuilder.Image { + return true + } + } + return false +} + +// Build configures settings for the build container(s) and lifecycle. +// It then invokes the lifecycle to build an app image. +// If any configuration is deemed invalid, or if any lifecycle phases fail, +// an error will be returned and no image produced. +func (c *Client) Build(ctx context.Context, opts BuildOptions) error { + imageRef, err := c.parseTagReference(opts.Image) + if err != nil { + return errors.Wrapf(err, "invalid image name '%s'", opts.Image) + } + + appPath, err := c.processAppPath(opts.AppPath) + if err != nil { + return errors.Wrapf(err, "invalid app path '%s'", opts.AppPath) + } + + proxyConfig := c.processProxyConfig(opts.ProxyConfig) + + builderRef, err := c.processBuilderName(opts.Builder) + if err != nil { + return errors.Wrapf(err, "invalid builder '%s'", opts.Builder) + } + + rawBuilderImage, err := c.imageFetcher.Fetch(ctx, builderRef.Name(), image.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy}) + if err != nil { + return errors.Wrapf(err, "failed to fetch builder image '%s'", builderRef.Name()) + } + + bldr, err := c.getBuilder(rawBuilderImage) + if err != nil { + return errors.Wrapf(err, "invalid builder %s", style.Symbol(opts.Builder)) + } + + runImageName := c.resolveRunImage(opts.RunImage, imageRef.Context().RegistryStr(), builderRef.Context().RegistryStr(), bldr.Stack(), opts.AdditionalMirrors, opts.Publish) + runImage, err := c.validateRunImage(ctx, runImageName, opts.PullPolicy, opts.Publish, bldr.StackID) + if err != nil { + return errors.Wrapf(err, "invalid run-image '%s'", runImageName) + } + + var runMixins []string + if _, err := dist.GetLabel(runImage, stack.MixinsLabel, &runMixins); err != nil { + return err + } + + fetchedBPs, order, err := c.processBuildpacks(ctx, bldr.Image(), bldr.Buildpacks(), bldr.Order(), bldr.StackID, opts) + if err != nil { + return err + } + + if err := c.validateMixins(fetchedBPs, bldr, runImageName, runMixins); err != nil { + return errors.Wrap(err, "validating stack mixins") + } + + buildEnvs := map[string]string{} + for _, envVar := range opts.ProjectDescriptor.Build.Env { + buildEnvs[envVar.Name] = envVar.Value + } + + for k, v := range opts.Env { + buildEnvs[k] = v + } + + ephemeralBuilder, err := c.createEphemeralBuilder(rawBuilderImage, buildEnvs, order, fetchedBPs) + if err != nil { + return err + } + defer c.docker.ImageRemove(context.Background(), ephemeralBuilder.Name(), types.ImageRemoveOptions{Force: true}) + + builderPlatformAPIs := append( + ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Deprecated, + ephemeralBuilder.LifecycleDescriptor().APIs.Platform.Supported..., + ) + + if !supportsPlatformAPI(builderPlatformAPIs) { + c.logger.Debugf("pack %s supports Platform API(s): %s", c.version, strings.Join(build.SupportedPlatformAPIVersions.AsStrings(), ", ")) + c.logger.Debugf("Builder %s supports Platform API(s): %s", style.Symbol(opts.Builder), strings.Join(builderPlatformAPIs.AsStrings(), ", ")) + return errors.Errorf("Builder %s is incompatible with this version of pack", style.Symbol(opts.Builder)) + } + + imgOS, err := rawBuilderImage.OS() + if err != nil { + return errors.Wrapf(err, "getting builder OS") + } + + processedVolumes, warnings, err := processVolumes(imgOS, opts.ContainerConfig.Volumes) + if err != nil { + return err + } + + for _, warning := range warnings { + c.logger.Warn(warning) + } + + fileFilter, err := getFileFilter(opts.ProjectDescriptor) + if err != nil { + return err + } + + runImageName, err = pname.TranslateRegistry(runImageName, c.registryMirrors, c.logger) + if err != nil { + return err + } + + projectMetadata := platform.ProjectMetadata{} + if c.experimental { + version := opts.ProjectDescriptor.Project.Version + sourceURL := opts.ProjectDescriptor.Project.SourceURL + if version != "" || sourceURL != "" { + projectMetadata.Source = &platform.ProjectSource{ + Type: "project", + Version: map[string]interface{}{"declared": version}, + Metadata: map[string]interface{}{"url": sourceURL}, + } + } + } + + // Default mode: if the TrustBuilder option is not set, trust the suggested builders. + if opts.TrustBuilder == nil { + opts.TrustBuilder = IsSuggestedBuilderFunc + } + + lifecycleOpts := build.LifecycleOptions{ + AppPath: appPath, + Image: imageRef, + Builder: ephemeralBuilder, + LifecycleImage: ephemeralBuilder.Name(), + RunImage: runImageName, + ProjectMetadata: projectMetadata, + ClearCache: opts.ClearCache, + Publish: opts.Publish, + TrustBuilder: opts.TrustBuilder(opts.Builder), + UseCreator: false, + DockerHost: opts.DockerHost, + CacheImage: opts.CacheImage, + HTTPProxy: proxyConfig.HTTPProxy, + HTTPSProxy: proxyConfig.HTTPSProxy, + NoProxy: proxyConfig.NoProxy, + Network: opts.ContainerConfig.Network, + AdditionalTags: opts.AdditionalTags, + Volumes: processedVolumes, + DefaultProcessType: opts.DefaultProcessType, + FileFilter: fileFilter, + Workspace: opts.Workspace, + GID: opts.GroupID, + PreviousImage: opts.PreviousImage, + Interactive: opts.Interactive, + Termui: termui.NewTermui(), + } + + lifecycleVersion := ephemeralBuilder.LifecycleDescriptor().Info.Version + // Technically the creator is supported as of platform API version 0.3 (lifecycle version 0.7.0+) but earlier versions + // have bugs that make using the creator problematic. + lifecycleSupportsCreator := !lifecycleVersion.LessThan(semver.MustParse(minLifecycleVersionSupportingCreator)) + + if lifecycleSupportsCreator && opts.TrustBuilder(opts.Builder) { + lifecycleOpts.UseCreator = true + // no need to fetch a lifecycle image, it won't be used + if err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil { + return errors.Wrap(err, "executing lifecycle") + } + + return c.logImageNameAndSha(ctx, opts.Publish, imageRef) + } + + if !opts.TrustBuilder(opts.Builder) { + if lifecycleImageSupported(imgOS, lifecycleVersion) { + lifecycleImageName := opts.LifecycleImage + if lifecycleImageName == "" { + lifecycleImageName = fmt.Sprintf("%s:%s", internalConfig.DefaultLifecycleImageRepo, lifecycleVersion.String()) + } + + imgArch, err := rawBuilderImage.Architecture() + if err != nil { + return errors.Wrapf(err, "getting builder architecture") + } + + lifecycleImage, err := c.imageFetcher.Fetch( + ctx, + lifecycleImageName, + image.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy, Platform: fmt.Sprintf("%s/%s", imgOS, imgArch)}, + ) + if err != nil { + return errors.Wrap(err, "fetching lifecycle image") + } + + lifecycleOpts.LifecycleImage = lifecycleImage.Name() + } else { + return errors.Errorf("Lifecycle %s does not have an associated lifecycle image. Builder must be trusted.", lifecycleVersion.String()) + } + } + + if err := c.lifecycleExecutor.Execute(ctx, lifecycleOpts); err != nil { + return errors.Wrap(err, "executing lifecycle. This may be the result of using an untrusted builder") + } + + return c.logImageNameAndSha(ctx, opts.Publish, imageRef) +} + +func getFileFilter(descriptor projectTypes.Descriptor) (func(string) bool, error) { + if len(descriptor.Build.Exclude) > 0 { + excludes := ignore.CompileIgnoreLines(descriptor.Build.Exclude...) + return func(fileName string) bool { + return !excludes.MatchesPath(fileName) + }, nil + } + if len(descriptor.Build.Include) > 0 { + includes := ignore.CompileIgnoreLines(descriptor.Build.Include...) + return includes.MatchesPath, nil + } + + return nil, nil +} + +func lifecycleImageSupported(builderOS string, lifecycleVersion *builder.Version) bool { + return lifecycleVersion.Equal(builder.VersionMustParse(prevLifecycleVersionSupportingImage)) || + !lifecycleVersion.LessThan(semver.MustParse(minLifecycleVersionSupportingImage)) +} + +// supportsPlatformAPI determines whether pack can build using the builder based on the builder's supported Platform API versions. +func supportsPlatformAPI(builderPlatformAPIs builder.APISet) bool { + for _, packSupportedAPI := range build.SupportedPlatformAPIVersions { + for _, builderSupportedAPI := range builderPlatformAPIs { + supportsPlatform := packSupportedAPI.Compare(builderSupportedAPI) == 0 + if supportsPlatform { + return true + } + } + } + + return false +} + +func (c *Client) processBuilderName(builderName string) (name.Reference, error) { + if builderName == "" { + return nil, errors.New("builder is a required parameter if the client has no default builder") + } + return name.ParseReference(builderName, name.WeakValidation) +} + +func (c *Client) getBuilder(img imgutil.Image) (*builder.Builder, error) { + bldr, err := builder.FromImage(img) + if err != nil { + return nil, err + } + if bldr.Stack().RunImage.Image == "" { + return nil, errors.New("builder metadata is missing run-image") + } + + lifecycleDescriptor := bldr.LifecycleDescriptor() + if lifecycleDescriptor.Info.Version == nil { + return nil, errors.New("lifecycle version must be specified in builder") + } + if len(lifecycleDescriptor.APIs.Buildpack.Supported) == 0 { + return nil, errors.New("supported Lifecycle Buildpack APIs not specified") + } + if len(lifecycleDescriptor.APIs.Platform.Supported) == 0 { + return nil, errors.New("supported Lifecycle Platform APIs not specified") + } + + return bldr, nil +} + +func (c *Client) validateRunImage(context context.Context, name string, pullPolicy image.PullPolicy, publish bool, expectedStack string) (imgutil.Image, error) { + if name == "" { + return nil, errors.New("run image must be specified") + } + img, err := c.imageFetcher.Fetch(context, name, image.FetchOptions{Daemon: !publish, PullPolicy: pullPolicy}) + if err != nil { + return nil, err + } + stackID, err := img.Label("io.buildpacks.stack.id") + if err != nil { + return nil, err + } + if stackID != expectedStack { + return nil, fmt.Errorf("run-image stack id '%s' does not match builder stack '%s'", stackID, expectedStack) + } + return img, nil +} + +func (c *Client) validateMixins(additionalBuildpacks []buildpack.Buildpack, bldr *builder.Builder, runImageName string, runMixins []string) error { + if err := stack.ValidateMixins(bldr.Image().Name(), bldr.Mixins(), runImageName, runMixins); err != nil { + return err + } + + bps, err := allBuildpacks(bldr.Image(), additionalBuildpacks) + if err != nil { + return err + } + mixins := assembleAvailableMixins(bldr.Mixins(), runMixins) + + for _, bp := range bps { + if err := bp.EnsureStackSupport(bldr.StackID, mixins, true); err != nil { + return err + } + } + return nil +} + +// assembleAvailableMixins returns the set of mixins that are common between the two provided sets, plus build-only mixins and run-only mixins. +func assembleAvailableMixins(buildMixins, runMixins []string) []string { + // NOTE: We cannot simply union the two mixin sets, as this could introduce a mixin that is only present on one stack + // image but not the other. A buildpack that happens to require the mixin would fail to run properly, even though validation + // would pass. + // + // For example: + // + // Incorrect: + // Run image mixins: [A, B] + // Build image mixins: [A] + // Merged: [A, B] + // Buildpack requires: [A, B] + // Match? Yes + // + // Correct: + // Run image mixins: [A, B] + // Build image mixins: [A] + // Merged: [A] + // Buildpack requires: [A, B] + // Match? No + + buildOnly := stack.FindStageMixins(buildMixins, "build") + runOnly := stack.FindStageMixins(runMixins, "run") + _, _, common := stringset.Compare(buildMixins, runMixins) + + return append(common, append(buildOnly, runOnly...)...) +} + +// allBuildpacks aggregates all buildpacks declared on the image with additional buildpacks passed in. They are sorted +// by ID then Version. +func allBuildpacks(builderImage imgutil.Image, additionalBuildpacks []buildpack.Buildpack) ([]dist.BuildpackDescriptor, error) { + var all []dist.BuildpackDescriptor + var bpLayers dist.BuildpackLayers + if _, err := dist.GetLabel(builderImage, dist.BuildpackLayersLabel, &bpLayers); err != nil { + return nil, err + } + for id, bps := range bpLayers { + for ver, bp := range bps { + desc := dist.BuildpackDescriptor{ + Info: dist.BuildpackInfo{ + ID: id, + Version: ver, + }, + Stacks: bp.Stacks, + Order: bp.Order, + } + all = append(all, desc) + } + } + for _, bp := range additionalBuildpacks { + all = append(all, bp.Descriptor()) + } + + sort.Slice(all, func(i, j int) bool { + if all[i].Info.ID != all[j].Info.ID { + return all[i].Info.ID < all[j].Info.ID + } + return all[i].Info.Version < all[j].Info.Version + }) + + return all, nil +} + +func (c *Client) processAppPath(appPath string) (string, error) { + var ( + resolvedAppPath string + err error + ) + + if appPath == "" { + if appPath, err = os.Getwd(); err != nil { + return "", errors.Wrap(err, "get working dir") + } + } + + if resolvedAppPath, err = filepath.EvalSymlinks(appPath); err != nil { + return "", errors.Wrap(err, "evaluate symlink") + } + + if resolvedAppPath, err = filepath.Abs(resolvedAppPath); err != nil { + return "", errors.Wrap(err, "resolve absolute path") + } + + fi, err := os.Stat(resolvedAppPath) + if err != nil { + return "", errors.Wrap(err, "stat file") + } + + if !fi.IsDir() { + isZip, err := archive.IsZip(filepath.Clean(resolvedAppPath)) + if err != nil { + return "", errors.Wrap(err, "check zip") + } + + if !isZip { + return "", errors.New("app path must be a directory or zip") + } + } + + return resolvedAppPath, nil +} + +func (c *Client) processProxyConfig(config *ProxyConfig) ProxyConfig { + var ( + httpProxy, httpsProxy, noProxy string + ok bool + ) + if config != nil { + return *config + } + if httpProxy, ok = os.LookupEnv("HTTP_PROXY"); !ok { + httpProxy = os.Getenv("http_proxy") + } + if httpsProxy, ok = os.LookupEnv("HTTPS_PROXY"); !ok { + httpsProxy = os.Getenv("https_proxy") + } + if noProxy, ok = os.LookupEnv("NO_PROXY"); !ok { + noProxy = os.Getenv("no_proxy") + } + return ProxyConfig{ + HTTPProxy: httpProxy, + HTTPSProxy: httpsProxy, + NoProxy: noProxy, + } +} + +// processBuildpacks computes an order group based on the existing builder order and declared buildpacks. Additionally, +// it returns buildpacks that should be added to the builder. +// +// Visual examples: +// +// BUILDER ORDER +// ---------- +// - group: +// - A +// - B +// - group: +// - A +// +// WITH DECLARED: "from=builder", X +// ---------- +// - group: +// - A +// - B +// - X +// - group: +// - A +// - X +// +// WITH DECLARED: X, "from=builder", Y +// ---------- +// - group: +// - X +// - A +// - B +// - Y +// - group: +// - X +// - A +// - Y +// +// WITH DECLARED: X +// ---------- +// - group: +// - X +// +// WITH DECLARED: A +// ---------- +// - group: +// - A +func (c *Client) processBuildpacks(ctx context.Context, builderImage imgutil.Image, builderBPs []dist.BuildpackInfo, builderOrder dist.Order, stackID string, opts BuildOptions) (fetchedBPs []buildpack.Buildpack, order dist.Order, err error) { + pullPolicy := opts.PullPolicy + publish := opts.Publish + registry := opts.Registry + relativeBaseDir := opts.RelativeBaseDir + declaredBPs := opts.Buildpacks + + // declare buildpacks provided by project descriptor when no buildpacks are declared + if len(declaredBPs) == 0 && len(opts.ProjectDescriptor.Build.Buildpacks) != 0 { + relativeBaseDir = opts.ProjectDescriptorBaseDir + + for _, bp := range opts.ProjectDescriptor.Build.Buildpacks { + switch { + case bp.ID != "" && bp.Script.Inline != "" && bp.Version == "" && bp.URI == "": + if bp.Script.API == "" { + return nil, nil, errors.New("Missing API version for inline buildpack") + } + + pathToInlineBuildpack, err := createInlineBuildpack(bp, stackID) + if err != nil { + return nil, nil, errors.Wrap(err, "Could not create temporary inline buildpack") + } + declaredBPs = append(declaredBPs, pathToInlineBuildpack) + case bp.URI != "": + declaredBPs = append(declaredBPs, bp.URI) + case bp.ID != "" && bp.Version != "": + declaredBPs = append(declaredBPs, fmt.Sprintf("%s@%s", bp.ID, bp.Version)) + default: + return nil, nil, errors.New("Invalid buildpack defined in project descriptor") + } + } + } + + order = dist.Order{{Group: []dist.BuildpackRef{}}} + for _, bp := range declaredBPs { + locatorType, err := buildpack.GetLocatorType(bp, relativeBaseDir, builderBPs) + if err != nil { + return nil, nil, err + } + + switch locatorType { + case buildpack.FromBuilderLocator: + switch { + case len(order) == 0 || len(order[0].Group) == 0: + order = builderOrder + case len(order) > 1: + // This should only ever be possible if they are using from=builder twice which we don't allow + return nil, nil, errors.New("buildpacks from builder can only be defined once") + default: + newOrder := dist.Order{} + groupToAdd := order[0].Group + for _, bOrderEntry := range builderOrder { + newEntry := dist.OrderEntry{Group: append(groupToAdd, bOrderEntry.Group...)} + newOrder = append(newOrder, newEntry) + } + + order = newOrder + } + case buildpack.IDLocator: + id, version := buildpack.ParseIDLocator(bp) + order = appendBuildpackToOrder(order, dist.BuildpackInfo{ + ID: id, + Version: version, + }) + default: + imageOS, err := builderImage.OS() + if err != nil { + return fetchedBPs, order, errors.Wrapf(err, "getting OS from %s", style.Symbol(builderImage.Name())) + } + mainBP, depBPs, err := c.buildpackDownloader.Download(ctx, bp, buildpack.DownloadOptions{ + RegistryName: registry, + ImageOS: imageOS, + RelativeBaseDir: relativeBaseDir, + Daemon: !publish, + PullPolicy: pullPolicy, + }) + if err != nil { + return fetchedBPs, order, errors.Wrap(err, "downloading buildpack") + } + fetchedBPs = append(append(fetchedBPs, mainBP), depBPs...) + order = appendBuildpackToOrder(order, mainBP.Descriptor().Info) + } + } + + return fetchedBPs, order, nil +} + +func appendBuildpackToOrder(order dist.Order, bpInfo dist.BuildpackInfo) (newOrder dist.Order) { + for _, orderEntry := range order { + newEntry := orderEntry + newEntry.Group = append(newEntry.Group, dist.BuildpackRef{ + BuildpackInfo: bpInfo, + Optional: false, + }) + newOrder = append(newOrder, newEntry) + } + + return newOrder +} + +func (c *Client) createEphemeralBuilder(rawBuilderImage imgutil.Image, env map[string]string, order dist.Order, buildpacks []buildpack.Buildpack) (*builder.Builder, error) { + origBuilderName := rawBuilderImage.Name() + bldr, err := builder.New(rawBuilderImage, fmt.Sprintf("pack.local/builder/%x:latest", randString(10))) + if err != nil { + return nil, errors.Wrapf(err, "invalid builder %s", style.Symbol(origBuilderName)) + } + + bldr.SetEnv(env) + for _, bp := range buildpacks { + bpInfo := bp.Descriptor().Info + c.logger.Debugf("Adding buildpack %s version %s to builder", style.Symbol(bpInfo.ID), style.Symbol(bpInfo.Version)) + bldr.AddBuildpack(bp) + } + if len(order) > 0 && len(order[0].Group) > 0 { + c.logger.Debug("Setting custom order") + bldr.SetOrder(order) + } + + if err := bldr.Save(c.logger, builder.CreatorMetadata{Version: c.version}); err != nil { + return nil, err + } + return bldr, nil +} + +// Returns a string iwith lowercase a-z, of length n +func randString(n int) string { + b := make([]byte, n) + _, err := rand.Read(b) + if err != nil { + panic(err) + } + for i := range b { + b[i] = 'a' + (b[i] % 26) + } + return string(b) +} + +func processVolumes(imgOS string, volumes []string) (processed []string, warnings []string, err error) { + parserOS := mounts.OSLinux + if imgOS == "windows" { + parserOS = mounts.OSWindows + } + parser := mounts.NewParser(parserOS) + for _, v := range volumes { + volume, err := parser.ParseMountRaw(v, "") + if err != nil { + return nil, nil, errors.Wrapf(err, "platform volume %q has invalid format", v) + } + + sensitiveDirs := []string{"/cnb", "/layers"} + if imgOS == "windows" { + sensitiveDirs = []string{`c:/cnb`, `c:\cnb`, `c:/layers`, `c:\layers`} + } + for _, p := range sensitiveDirs { + if strings.HasPrefix(strings.ToLower(volume.Spec.Target), p) { + warnings = append(warnings, fmt.Sprintf("Mounting to a sensitive directory %s", style.Symbol(volume.Spec.Target))) + } + } + + processed = append(processed, fmt.Sprintf("%s:%s:%s", volume.Spec.Source, volume.Spec.Target, processMode(volume.Mode))) + } + return processed, warnings, nil +} + +func processMode(mode string) string { + if mode == "" { + return "ro" + } + + return mode +} + +func (c *Client) logImageNameAndSha(ctx context.Context, publish bool, imageRef name.Reference) error { + // The image name and sha are printed in the lifecycle logs, and there is no need to print it again, unless output is suppressed. + if !logging.IsQuiet(c.logger) { + return nil + } + + img, err := c.imageFetcher.Fetch(ctx, imageRef.Name(), image.FetchOptions{Daemon: !publish, PullPolicy: image.PullNever}) + if err != nil { + return errors.Wrap(err, "fetching built image") + } + + id, err := img.Identifier() + if err != nil { + return errors.Wrap(err, "reading image sha") + } + + // Remove tag, if it exists, from the image name + imgName := strings.TrimSuffix(imageRef.String(), imageRef.Identifier()) + imgNameAndSha := fmt.Sprintf("%s@%s\n", imgName, parseDigestFromImageID(id)) + + // Access the logger's Writer directly to bypass ReportSuccessfulQuietBuild mode + _, err = c.logger.Writer().Write([]byte(imgNameAndSha)) + return err +} + +func parseDigestFromImageID(id imgutil.Identifier) string { + var digest string + switch v := id.(type) { + case local.IDIdentifier: + digest = v.String() + case remote.DigestIdentifier: + digest = v.Digest.DigestStr() + } + + digest = strings.TrimPrefix(digest, "sha256:") + return fmt.Sprintf("sha256:%s", digest) +} + +func createInlineBuildpack(bp projectTypes.Buildpack, stackID string) (string, error) { + pathToInlineBuilpack, err := ioutil.TempDir("", "inline-cnb") + if err != nil { + return pathToInlineBuilpack, err + } + + if err = createBuildpackTOML(pathToInlineBuilpack, bp.ID, "0.0.0", bp.Script.API, []dist.Stack{{ID: stackID}}, nil); err != nil { + return pathToInlineBuilpack, err + } + + shell := bp.Script.Shell + if shell == "" { + shell = "/bin/sh" + } + + binBuild := fmt.Sprintf(`#!%s + +%s +`, shell, bp.Script.Inline) + + binDetect := fmt.Sprintf(`#!%s + +exit 0 +`, shell) + + if err = createBinScript(pathToInlineBuilpack, "build", binBuild, nil); err != nil { + return pathToInlineBuilpack, err + } + + if err = createBinScript(pathToInlineBuilpack, "build.bat", bp.Script.Inline, nil); err != nil { + return pathToInlineBuilpack, err + } + + if err = createBinScript(pathToInlineBuilpack, "detect", binDetect, nil); err != nil { + return pathToInlineBuilpack, err + } + + if err = createBinScript(pathToInlineBuilpack, "detect.bat", bp.Script.Inline, nil); err != nil { + return pathToInlineBuilpack, err + } + + return pathToInlineBuilpack, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/client.go b/vendor/github.com/buildpacks/pack/pkg/client/client.go new file mode 100644 index 0000000000..0dc7b311f5 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/client.go @@ -0,0 +1,275 @@ +/* +Package client provides all the functionally provided by pack as a library through a go api. + +Prerequisites + +In order to use most functionality, you will need an OCI runtime such as Docker or podman installed. + +References + +This package provides functionality to create and manipulate all artifacts outlined in the Cloud Native Buildpacks specification. +An introduction to these artifacts and their usage can be found at https://buildpacks.io/docs/. + +The formal specification of the pack platform provides can be found at: https://github.com/buildpacks/spec. +*/ +package client + +import ( + "context" + "os" + "path/filepath" + + "github.com/buildpacks/imgutil" + "github.com/buildpacks/imgutil/local" + "github.com/buildpacks/imgutil/remote" + dockerClient "github.com/docker/docker/client" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/pkg/errors" + + "github.com/buildpacks/pack" + "github.com/buildpacks/pack/internal/build" + iconfig "github.com/buildpacks/pack/internal/config" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/blob" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/image" + "github.com/buildpacks/pack/pkg/logging" +) + +//go:generate mockgen -package testmocks -destination ../testmocks/mock_docker_client.go github.com/docker/docker/client CommonAPIClient + +//go:generate mockgen -package testmocks -destination ../testmocks/mock_image_fetcher.go github.com/buildpacks/pack/pkg/client ImageFetcher + +// ImageFetcher is an interface representing the ability to fetch local and images. +type ImageFetcher interface { + // Fetch fetches an image by resolving it both remotely and locally depending on provided parameters. + // The pull behavior is dictated by the pullPolicy, which can have the following behavior + // - PullNever: try to use the daemon to return a `local.Image`. + // - PullIfNotPResent: try look to use the daemon to return a `local.Image`, if none is found fetch a remote image. + // - PullAlways: it will only try to fetch a remote image. + // + // These PullPolicies that these interact with the daemon argument. + // PullIfNotPresent and daemon = false, gives us the same behavior as PullAlways. + // There is a single invalid configuration, PullNever and daemon = false, this will always fail. + Fetch(ctx context.Context, name string, options image.FetchOptions) (imgutil.Image, error) +} + +//go:generate mockgen -package testmocks -destination ../testmocks/mock_blob_downloader.go github.com/buildpacks/pack/pkg/client BlobDownloader + +// BlobDownloader is an interface for collecting both remote and local assets as blobs. +type BlobDownloader interface { + // Download collects both local and remote assets and provides a blob object + // used to read asset contents. + Download(ctx context.Context, pathOrURI string) (blob.Blob, error) +} + +//go:generate mockgen -package testmocks -destination ../testmocks/mock_image_factory.go github.com/buildpacks/pack/pkg/client ImageFactory + +// ImageFactory is an interface representing the ability to create a new OCI image. +type ImageFactory interface { + // NewImage initializes an image object with required settings so that it + // can be written either locally or to a registry. + NewImage(repoName string, local bool, imageOS string) (imgutil.Image, error) +} + +//go:generate mockgen -package testmocks -destination ../testmocks/mock_buildpack_downloader.go github.com/buildpacks/pack/pkg/client BuildpackDownloader + +// BuildpackDownloader is an interface for downloading and extracting buildpacks from various sources +type BuildpackDownloader interface { + // Download parses a buildpack URI and downloads the buildpack and any dependencies buildpacks from the appropriate source + Download(ctx context.Context, buildpackURI string, opts buildpack.DownloadOptions) (buildpack.Buildpack, []buildpack.Buildpack, error) +} + +// Client is an orchestration object, it contains all parameters needed to +// build an app image using Cloud Native Buildpacks. +// All settings on this object should be changed through ClientOption functions. +type Client struct { + logger logging.Logger + docker dockerClient.CommonAPIClient + + keychain authn.Keychain + imageFactory ImageFactory + imageFetcher ImageFetcher + downloader BlobDownloader + lifecycleExecutor LifecycleExecutor + buildpackDownloader BuildpackDownloader + + experimental bool + registryMirrors map[string]string + version string +} + +// Option is a type of function that mutate settings on the client. +// Values in these functions are set through currying. +type Option func(c *Client) + +// WithLogger supply your own logger. +func WithLogger(l logging.Logger) Option { + return func(c *Client) { + c.logger = l + } +} + +// WithImageFactory supply your own image factory. +func WithImageFactory(f ImageFactory) Option { + return func(c *Client) { + c.imageFactory = f + } +} + +// WithFetcher supply your own Fetcher. +// A Fetcher retrieves both local and remote images to make them available. +func WithFetcher(f ImageFetcher) Option { + return func(c *Client) { + c.imageFetcher = f + } +} + +// WithDownloader supply your own downloader. +// A Downloader is used to gather buildpacks from both remote urls, or local sources. +func WithDownloader(d BlobDownloader) Option { + return func(c *Client) { + c.downloader = d + } +} + +// WithBuildpackDownloader supply your own BuildpackDownloader. +// A BuildpackDownloader is used to gather buildpacks from both remote urls, or local sources. +func WithBuildpackDownloader(d BuildpackDownloader) Option { + return func(c *Client) { + c.buildpackDownloader = d + } +} + +// Deprecated: use WithDownloader instead. +// +// WithCacheDir supply your own cache directory. +func WithCacheDir(path string) Option { + return func(c *Client) { + c.downloader = blob.NewDownloader(c.logger, path) + } +} + +// WithDockerClient supply your own docker client. +func WithDockerClient(docker dockerClient.CommonAPIClient) Option { + return func(c *Client) { + c.docker = docker + } +} + +// WithExperimental sets whether experimental features should be enabled. +func WithExperimental(experimental bool) Option { + return func(c *Client) { + c.experimental = experimental + } +} + +// WithRegistryMirrors sets mirrors to pull images from. +func WithRegistryMirrors(registryMirrors map[string]string) Option { + return func(c *Client) { + c.registryMirrors = registryMirrors + } +} + +// WithKeychain sets keychain of credentials to image registries +func WithKeychain(keychain authn.Keychain) Option { + return func(c *Client) { + c.keychain = keychain + } +} + +const DockerAPIVersion = "1.38" + +// NewClient allocates and returns a Client configured with the specified options. +func NewClient(opts ...Option) (*Client, error) { + client := &Client{ + version: pack.Version, + keychain: authn.DefaultKeychain, + } + + for _, opt := range opts { + opt(client) + } + + if client.logger == nil { + client.logger = logging.NewSimpleLogger(os.Stderr) + } + + if client.docker == nil { + var err error + client.docker, err = dockerClient.NewClientWithOpts( + dockerClient.FromEnv, + dockerClient.WithVersion(DockerAPIVersion), + ) + if err != nil { + return nil, errors.Wrap(err, "creating docker client") + } + } + + if client.downloader == nil { + packHome, err := iconfig.PackHome() + if err != nil { + return nil, errors.Wrap(err, "getting pack home") + } + client.downloader = blob.NewDownloader(client.logger, filepath.Join(packHome, "download-cache")) + } + + if client.imageFetcher == nil { + client.imageFetcher = image.NewFetcher(client.logger, client.docker, image.WithRegistryMirrors(client.registryMirrors)) + } + + if client.imageFactory == nil { + client.imageFactory = &imageFactory{ + dockerClient: client.docker, + keychain: client.keychain, + } + } + + if client.buildpackDownloader == nil { + client.buildpackDownloader = buildpack.NewDownloader( + client.logger, + client.imageFetcher, + client.downloader, + ®istryResolver{ + logger: client.logger, + }, + ) + } + + client.lifecycleExecutor = build.NewLifecycleExecutor(client.logger, client.docker) + + return client, nil +} + +type registryResolver struct { + logger logging.Logger +} + +func (r *registryResolver) Resolve(registryName, bpName string) (string, error) { + cache, err := getRegistry(r.logger, registryName) + if err != nil { + return "", errors.Wrapf(err, "lookup registry %s", style.Symbol(registryName)) + } + + regBuildpack, err := cache.LocateBuildpack(bpName) + if err != nil { + return "", errors.Wrapf(err, "lookup buildpack %s", style.Symbol(bpName)) + } + + return regBuildpack.Address, nil +} + +type imageFactory struct { + dockerClient dockerClient.CommonAPIClient + keychain authn.Keychain +} + +func (f *imageFactory) NewImage(repoName string, daemon bool, imageOS string) (imgutil.Image, error) { + platform := imgutil.Platform{OS: imageOS} + + if daemon { + return local.NewImage(repoName, f.dockerClient, local.WithDefaultPlatform(platform)) + } + + return remote.NewImage(repoName, f.keychain, remote.WithDefaultPlatform(platform)) +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/common.go b/vendor/github.com/buildpacks/pack/pkg/client/common.go new file mode 100644 index 0000000000..b67916729a --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/common.go @@ -0,0 +1,127 @@ +package client + +import ( + "errors" + "fmt" + + "github.com/google/go-containerregistry/pkg/name" + + "github.com/buildpacks/pack/internal/builder" + "github.com/buildpacks/pack/internal/config" + "github.com/buildpacks/pack/internal/registry" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/logging" +) + +func (c *Client) parseTagReference(imageName string) (name.Reference, error) { + if imageName == "" { + return nil, errors.New("image is a required parameter") + } + if _, err := name.ParseReference(imageName, name.WeakValidation); err != nil { + return nil, err + } + ref, err := name.NewTag(imageName, name.WeakValidation) + if err != nil { + return nil, fmt.Errorf("'%s' is not a tag reference", imageName) + } + + return ref, nil +} + +func (c *Client) resolveRunImage(runImage, imgRegistry, bldrRegistry string, stackInfo builder.StackMetadata, additionalMirrors map[string][]string, publish bool) string { + if runImage != "" { + c.logger.Debugf("Using provided run-image %s", style.Symbol(runImage)) + return runImage + } + + preferredRegistry := bldrRegistry + if publish || bldrRegistry == "" { + preferredRegistry = imgRegistry + } + + runImageName := getBestRunMirror( + preferredRegistry, + stackInfo.RunImage.Image, + stackInfo.RunImage.Mirrors, + additionalMirrors[stackInfo.RunImage.Image], + ) + + switch { + case runImageName == stackInfo.RunImage.Image: + c.logger.Debugf("Selected run image %s", style.Symbol(runImageName)) + case contains(stackInfo.RunImage.Mirrors, runImageName): + c.logger.Debugf("Selected run image mirror %s", style.Symbol(runImageName)) + default: + c.logger.Debugf("Selected run image mirror %s from local config", style.Symbol(runImageName)) + } + return runImageName +} + +func getRegistry(logger logging.Logger, registryName string) (registry.Cache, error) { + home, err := config.PackHome() + if err != nil { + return registry.Cache{}, err + } + + if err := config.MkdirAll(home); err != nil { + return registry.Cache{}, err + } + + cfg, err := getConfig() + if err != nil { + return registry.Cache{}, err + } + + if registryName == "" { + return registry.NewDefaultRegistryCache(logger, home) + } + + for _, reg := range config.GetRegistries(cfg) { + if reg.Name == registryName { + return registry.NewRegistryCache(logger, home, reg.URL) + } + } + + return registry.Cache{}, fmt.Errorf("registry %s is not defined in your config file", style.Symbol(registryName)) +} + +func getConfig() (config.Config, error) { + path, err := config.DefaultConfigPath() + if err != nil { + return config.Config{}, err + } + + cfg, err := config.Read(path) + if err != nil { + return config.Config{}, err + } + return cfg, nil +} + +func contains(slc []string, v string) bool { + for _, s := range slc { + if s == v { + return true + } + } + return false +} + +func getBestRunMirror(registry string, runImage string, mirrors []string, preferredMirrors []string) string { + runImageList := append(preferredMirrors, append([]string{runImage}, mirrors...)...) + for _, img := range runImageList { + ref, err := name.ParseReference(img, name.WeakValidation) + if err != nil { + continue + } + if ref.Context().RegistryStr() == registry { + return img + } + } + + if len(preferredMirrors) > 0 { + return preferredMirrors[0] + } + + return runImage +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/create_builder.go b/vendor/github.com/buildpacks/pack/pkg/client/create_builder.go new file mode 100644 index 0000000000..34bca4b5ee --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/create_builder.go @@ -0,0 +1,272 @@ +package client + +import ( + "context" + "fmt" + + "github.com/Masterminds/semver" + "github.com/buildpacks/imgutil" + "github.com/pkg/errors" + + pubbldr "github.com/buildpacks/pack/builder" + "github.com/buildpacks/pack/internal/builder" + "github.com/buildpacks/pack/internal/paths" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/image" +) + +// CreateBuilderOptions is a configuration object used to change the behavior of +// CreateBuilder. +type CreateBuilderOptions struct { + // The base directory to use to resolve relative assets + RelativeBaseDir string + + // Name of the builder. + BuilderName string + + // Configuration that defines the functionality a builder provides. + Config pubbldr.Config + + // Skip building image locally, directly publish to a registry. + // Requires BuilderName to be a valid registry location. + Publish bool + + // Buildpack registry name. Defines where all registry buildpacks will be pulled from. + Registry string + + // Strategy for updating images before a build. + PullPolicy image.PullPolicy +} + +// CreateBuilder creates and saves a builder image to a registry with the provided options. +// If any configuration is invalid, it will error and exit without creating any images. +func (c *Client) CreateBuilder(ctx context.Context, opts CreateBuilderOptions) error { + if err := c.validateConfig(ctx, opts); err != nil { + return err + } + + bldr, err := c.createBaseBuilder(ctx, opts) + if err != nil { + return errors.Wrap(err, "failed to create builder") + } + + if err := c.addBuildpacksToBuilder(ctx, opts, bldr); err != nil { + return errors.Wrap(err, "failed to add buildpacks to builder") + } + + bldr.SetOrder(opts.Config.Order) + bldr.SetStack(opts.Config.Stack) + + return bldr.Save(c.logger, builder.CreatorMetadata{Version: c.version}) +} + +func (c *Client) validateConfig(ctx context.Context, opts CreateBuilderOptions) error { + if err := pubbldr.ValidateConfig(opts.Config); err != nil { + return errors.Wrap(err, "invalid builder config") + } + + if err := c.validateRunImageConfig(ctx, opts); err != nil { + return errors.Wrap(err, "invalid run image config") + } + + return nil +} + +func (c *Client) validateRunImageConfig(ctx context.Context, opts CreateBuilderOptions) error { + var runImages []imgutil.Image + for _, i := range append([]string{opts.Config.Stack.RunImage}, opts.Config.Stack.RunImageMirrors...) { + if !opts.Publish { + img, err := c.imageFetcher.Fetch(ctx, i, image.FetchOptions{Daemon: true, PullPolicy: opts.PullPolicy}) + if err != nil { + if errors.Cause(err) != image.ErrNotFound { + return errors.Wrap(err, "failed to fetch image") + } + } else { + runImages = append(runImages, img) + continue + } + } + + img, err := c.imageFetcher.Fetch(ctx, i, image.FetchOptions{Daemon: false, PullPolicy: opts.PullPolicy}) + if err != nil { + if errors.Cause(err) != image.ErrNotFound { + return errors.Wrap(err, "failed to fetch image") + } + c.logger.Warnf("run image %s is not accessible", style.Symbol(i)) + } else { + runImages = append(runImages, img) + } + } + + for _, img := range runImages { + stackID, err := img.Label("io.buildpacks.stack.id") + if err != nil { + return errors.Wrap(err, "failed to label image") + } + + if stackID != opts.Config.Stack.ID { + return fmt.Errorf( + "stack %s from builder config is incompatible with stack %s from run image %s", + style.Symbol(opts.Config.Stack.ID), + style.Symbol(stackID), + style.Symbol(img.Name()), + ) + } + } + + return nil +} + +func (c *Client) createBaseBuilder(ctx context.Context, opts CreateBuilderOptions) (*builder.Builder, error) { + baseImage, err := c.imageFetcher.Fetch(ctx, opts.Config.Stack.BuildImage, image.FetchOptions{Daemon: !opts.Publish, PullPolicy: opts.PullPolicy}) + if err != nil { + return nil, errors.Wrap(err, "fetch build image") + } + + c.logger.Debugf("Creating builder %s from build-image %s", style.Symbol(opts.BuilderName), style.Symbol(baseImage.Name())) + bldr, err := builder.New(baseImage, opts.BuilderName) + if err != nil { + return nil, errors.Wrap(err, "invalid build-image") + } + + os, err := baseImage.OS() + if err != nil { + return nil, errors.Wrap(err, "lookup image OS") + } + + if os == "windows" && !c.experimental { + return nil, NewExperimentError("Windows containers support is currently experimental.") + } + + bldr.SetDescription(opts.Config.Description) + + if bldr.StackID != opts.Config.Stack.ID { + return nil, fmt.Errorf( + "stack %s from builder config is incompatible with stack %s from build image", + style.Symbol(opts.Config.Stack.ID), + style.Symbol(bldr.StackID), + ) + } + + lifecycle, err := c.fetchLifecycle(ctx, opts.Config.Lifecycle, opts.RelativeBaseDir, os) + if err != nil { + return nil, errors.Wrap(err, "fetch lifecycle") + } + + bldr.SetLifecycle(lifecycle) + + return bldr, nil +} + +func (c *Client) fetchLifecycle(ctx context.Context, config pubbldr.LifecycleConfig, relativeBaseDir, os string) (builder.Lifecycle, error) { + if config.Version != "" && config.URI != "" { + return nil, errors.Errorf( + "%s can only declare %s or %s, not both", + style.Symbol("lifecycle"), style.Symbol("version"), style.Symbol("uri"), + ) + } + + var uri string + var err error + switch { + case config.Version != "": + v, err := semver.NewVersion(config.Version) + if err != nil { + return nil, errors.Wrapf(err, "%s must be a valid semver", style.Symbol("lifecycle.version")) + } + + uri = uriFromLifecycleVersion(*v, os) + case config.URI != "": + uri, err = paths.FilePathToURI(config.URI, relativeBaseDir) + if err != nil { + return nil, err + } + default: + uri = uriFromLifecycleVersion(*semver.MustParse(builder.DefaultLifecycleVersion), os) + } + + blob, err := c.downloader.Download(ctx, uri) + if err != nil { + return nil, errors.Wrap(err, "downloading lifecycle") + } + + lifecycle, err := builder.NewLifecycle(blob) + if err != nil { + return nil, errors.Wrap(err, "invalid lifecycle") + } + + return lifecycle, nil +} + +func (c *Client) addBuildpacksToBuilder(ctx context.Context, opts CreateBuilderOptions, bldr *builder.Builder) error { + for _, b := range opts.Config.Buildpacks { + c.logger.Debugf("Looking up buildpack %s", style.Symbol(b.DisplayString())) + + imageOS, err := bldr.Image().OS() + if err != nil { + return errors.Wrapf(err, "getting OS from %s", style.Symbol(bldr.Image().Name())) + } + + mainBP, depBPs, err := c.buildpackDownloader.Download(ctx, b.URI, buildpack.DownloadOptions{ + RegistryName: opts.Registry, + ImageOS: imageOS, + RelativeBaseDir: opts.RelativeBaseDir, + Daemon: !opts.Publish, + PullPolicy: opts.PullPolicy, + ImageName: b.ImageName, + }) + if err != nil { + return errors.Wrap(err, "downloading buildpack") + } + + err = validateBuildpack(mainBP, b.URI, b.ID, b.Version) + if err != nil { + return errors.Wrap(err, "invalid buildpack") + } + + bpDesc := mainBP.Descriptor() + for _, deprecatedAPI := range bldr.LifecycleDescriptor().APIs.Buildpack.Deprecated { + if deprecatedAPI.Equal(bpDesc.API) { + c.logger.Warnf("Buildpack %s is using deprecated Buildpacks API version %s", style.Symbol(bpDesc.Info.FullName()), style.Symbol(bpDesc.API.String())) + break + } + } + + for _, bp := range append([]buildpack.Buildpack{mainBP}, depBPs...) { + bldr.AddBuildpack(bp) + } + } + + return nil +} + +func validateBuildpack(bp buildpack.Buildpack, source, expectedID, expectedBPVersion string) error { + if expectedID != "" && bp.Descriptor().Info.ID != expectedID { + return fmt.Errorf( + "buildpack from URI %s has ID %s which does not match ID %s from builder config", + style.Symbol(source), + style.Symbol(bp.Descriptor().Info.ID), + style.Symbol(expectedID), + ) + } + + if expectedBPVersion != "" && bp.Descriptor().Info.Version != expectedBPVersion { + return fmt.Errorf( + "buildpack from URI %s has version %s which does not match version %s from builder config", + style.Symbol(source), + style.Symbol(bp.Descriptor().Info.Version), + style.Symbol(expectedBPVersion), + ) + } + + return nil +} + +func uriFromLifecycleVersion(version semver.Version, os string) string { + if os == "windows" { + return fmt.Sprintf("https://github.com/buildpacks/lifecycle/releases/download/v%s/lifecycle-v%s+windows.x86-64.tgz", version.String(), version.String()) + } + + return fmt.Sprintf("https://github.com/buildpacks/lifecycle/releases/download/v%s/lifecycle-v%s+linux.x86-64.tgz", version.String(), version.String()) +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/errors.go b/vendor/github.com/buildpacks/pack/pkg/client/errors.go new file mode 100644 index 0000000000..ef4092154c --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/errors.go @@ -0,0 +1,25 @@ +package client + +// ExperimentError denotes that an experimental feature was trying to be used without experimental features enabled. +type ExperimentError struct { + msg string +} + +func NewExperimentError(msg string) ExperimentError { + return ExperimentError{msg} +} + +func (ee ExperimentError) Error() string { + return ee.msg +} + +// SoftError is an error that is not intended to be displayed. +type SoftError struct{} + +func NewSoftError() SoftError { + return SoftError{} +} + +func (se SoftError) Error() string { + return "" +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/inspect_builder.go b/vendor/github.com/buildpacks/pack/pkg/client/inspect_builder.go new file mode 100644 index 0000000000..8a22249f3d --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/inspect_builder.go @@ -0,0 +1,106 @@ +package client + +import ( + "errors" + + pubbldr "github.com/buildpacks/pack/builder" + + "github.com/buildpacks/pack/internal/builder" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +// BuilderInfo is a collection of metadata describing a builder created using client. +type BuilderInfo struct { + // Human readable, description of a builder. + Description string + + // Stack name used by the builder. + Stack string + + // List of Stack mixins, this information is provided by Stack variable. + Mixins []string + + // RunImage provided by the builder. + RunImage string + + // List of all run image mirrors a builder will use to provide + // the RunImage. + RunImageMirrors []string + + // All buildpacks included within the builder. + Buildpacks []dist.BuildpackInfo + + // Detailed ordering of buildpacks and nested buildpacks where depth is specified. + Order pubbldr.DetectionOrder + + // Listing of all buildpack layers in a builder. + // All elements in the Buildpacks variable are represented in this + // object. + BuildpackLayers dist.BuildpackLayers + + // Lifecycle provides the following API versioning information for a builder: + // - Lifecycle Version used in this builder, + // - Platform API, + // - Buildpack API. + Lifecycle builder.LifecycleDescriptor + + // Name and Version information from tooling used + // to produce this builder. + CreatedBy builder.CreatorMetadata +} + +// BuildpackInfoKey contains all information needed to determine buildpack equivalence. +type BuildpackInfoKey struct { + ID string + Version string +} + +type BuilderInspectionConfig struct { + OrderDetectionDepth int +} + +type BuilderInspectionModifier func(config *BuilderInspectionConfig) + +func WithDetectionOrderDepth(depth int) BuilderInspectionModifier { + return func(config *BuilderInspectionConfig) { + config.OrderDetectionDepth = depth + } +} + +// InspectBuilder reads label metadata of a local or remote builder image. It initializes a BuilderInfo +// object with this metadata, and returns it. This method will error if the name image cannot be found +// both locally and remotely, or if the found image does not contain the proper labels. +func (c *Client) InspectBuilder(name string, daemon bool, modifiers ...BuilderInspectionModifier) (*BuilderInfo, error) { + inspector := builder.NewInspector( + builder.NewImageFetcherWrapper(c.imageFetcher), + builder.NewLabelManagerProvider(), + builder.NewDetectionOrderCalculator(), + ) + + inspectionConfig := BuilderInspectionConfig{OrderDetectionDepth: pubbldr.OrderDetectionNone} + for _, mod := range modifiers { + mod(&inspectionConfig) + } + + info, err := inspector.Inspect(name, daemon, inspectionConfig.OrderDetectionDepth) + if err != nil { + if errors.Is(err, image.ErrNotFound) { + return nil, nil + } + return nil, err + } + + return &BuilderInfo{ + Description: info.Description, + Stack: info.StackID, + Mixins: info.Mixins, + RunImage: info.RunImage, + RunImageMirrors: info.RunImageMirrors, + Buildpacks: info.Buildpacks, + Order: info.Order, + BuildpackLayers: info.BuildpackLayers, + Lifecycle: info.Lifecycle, + CreatedBy: info.CreatedBy, + }, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/inspect_buildpack.go b/vendor/github.com/buildpacks/pack/pkg/client/inspect_buildpack.go new file mode 100644 index 0000000000..84f89ce813 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/inspect_buildpack.go @@ -0,0 +1,167 @@ +package client + +import ( + "context" + "fmt" + "sort" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" + + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +type BuildpackInfo struct { + BuildpackMetadata buildpack.Metadata + Buildpacks []dist.BuildpackInfo + Order dist.Order + BuildpackLayers dist.BuildpackLayers + Location buildpack.LocatorType +} + +type InspectBuildpackOptions struct { + BuildpackName string + Daemon bool + Registry string +} + +type ImgWrapper struct { + v1.ImageConfig +} + +func (iw ImgWrapper) Label(name string) (string, error) { + return iw.Labels[name], nil +} + +func (c *Client) InspectBuildpack(opts InspectBuildpackOptions) (*BuildpackInfo, error) { + locatorType, err := buildpack.GetLocatorType(opts.BuildpackName, "", []dist.BuildpackInfo{}) + if err != nil { + return nil, err + } + var layersMd dist.BuildpackLayers + var buildpackMd buildpack.Metadata + + switch locatorType { + case buildpack.RegistryLocator: + buildpackMd, layersMd, err = metadataFromRegistry(c, opts.BuildpackName, opts.Registry) + case buildpack.PackageLocator: + buildpackMd, layersMd, err = metadataFromImage(c, opts.BuildpackName, opts.Daemon) + case buildpack.URILocator: + buildpackMd, layersMd, err = metadataFromArchive(c.downloader, opts.BuildpackName) + default: + return nil, fmt.Errorf("unable to handle locator %q: for buildpack %q", locatorType, opts.BuildpackName) + } + if err != nil { + return nil, err + } + + return &BuildpackInfo{ + BuildpackMetadata: buildpackMd, + BuildpackLayers: layersMd, + Order: extractOrder(buildpackMd), + Buildpacks: extractBuildpacks(layersMd), + Location: locatorType, + }, nil +} + +func metadataFromRegistry(client *Client, name, registry string) (buildpackMd buildpack.Metadata, layersMd dist.BuildpackLayers, err error) { + registryCache, err := getRegistry(client.logger, registry) + if err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, fmt.Errorf("invalid registry %s: %q", registry, err) + } + + registryBp, err := registryCache.LocateBuildpack(name) + if err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, fmt.Errorf("unable to find %s in registry: %q", style.Symbol(name), err) + } + buildpackMd, layersMd, err = metadataFromImage(client, registryBp.Address, false) + if err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, fmt.Errorf("error pulling registry specified image: %s", err) + } + return buildpackMd, layersMd, nil +} + +func metadataFromArchive(downloader BlobDownloader, path string) (buildpackMd buildpack.Metadata, layersMd dist.BuildpackLayers, err error) { + imgBlob, err := downloader.Download(context.Background(), path) + if err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, fmt.Errorf("unable to download archive: %q", err) + } + + config, err := buildpack.ConfigFromOCILayoutBlob(imgBlob) + if err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, fmt.Errorf("unable to fetch config from buildpack blob: %q", err) + } + wrapper := ImgWrapper{config} + + if _, err := dist.GetLabel(wrapper, dist.BuildpackLayersLabel, &layersMd); err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, err + } + + if _, err := dist.GetLabel(wrapper, buildpack.MetadataLabel, &buildpackMd); err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, err + } + return buildpackMd, layersMd, nil +} + +func metadataFromImage(client *Client, name string, daemon bool) (buildpackMd buildpack.Metadata, layersMd dist.BuildpackLayers, err error) { + imageName := buildpack.ParsePackageLocator(name) + img, err := client.imageFetcher.Fetch(context.Background(), imageName, image.FetchOptions{Daemon: daemon, PullPolicy: image.PullNever}) + if err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, err + } + if _, err := dist.GetLabel(img, dist.BuildpackLayersLabel, &layersMd); err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, fmt.Errorf("unable to get image label %s: %q", dist.BuildpackLayersLabel, err) + } + + if _, err := dist.GetLabel(img, buildpack.MetadataLabel, &buildpackMd); err != nil { + return buildpack.Metadata{}, dist.BuildpackLayers{}, fmt.Errorf("unable to get image label %s: %q", buildpack.MetadataLabel, err) + } + return buildpackMd, layersMd, nil +} + +func extractOrder(buildpackMd buildpack.Metadata) dist.Order { + return dist.Order{ + { + Group: []dist.BuildpackRef{ + { + BuildpackInfo: buildpackMd.BuildpackInfo, + }, + }, + }, + } +} + +func extractBuildpacks(layersMd dist.BuildpackLayers) []dist.BuildpackInfo { + result := []dist.BuildpackInfo{} + buildpackSet := map[*dist.BuildpackInfo]bool{} + + for buildpackID, buildpackMap := range layersMd { + for version, layerInfo := range buildpackMap { + bp := dist.BuildpackInfo{ + ID: buildpackID, + Name: layerInfo.Name, + Version: version, + Homepage: layerInfo.Homepage, + } + buildpackSet[&bp] = true + } + } + + for currentBuildpack := range buildpackSet { + result = append(result, *currentBuildpack) + } + + sort.Slice(result, func(i int, j int) bool { + switch { + case result[i].ID < result[j].ID: + return true + case result[i].ID == result[j].ID: + return result[i].Version < result[j].Version + default: + return false + } + }) + return result +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/inspect_image.go b/vendor/github.com/buildpacks/pack/pkg/client/inspect_image.go new file mode 100644 index 0000000000..c881dc20a3 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/inspect_image.go @@ -0,0 +1,175 @@ +package client + +import ( + "context" + "strings" + + "github.com/Masterminds/semver" + "github.com/buildpacks/lifecycle/buildpack" + "github.com/buildpacks/lifecycle/launch" + "github.com/buildpacks/lifecycle/platform" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +// ImageInfo is a collection of metadata describing +// an app image built using Cloud Native Buildpacks. +type ImageInfo struct { + // Stack Identifier used when building this image + StackID string + + // List of buildpacks that passed detection, ran their build + // phases and made a contribution to this image. + Buildpacks []buildpack.GroupBuildpack + + // Base includes two references to the run image, + // - the Run Image ID, + // - the hash of the last layer in the app image that belongs to the run image. + // A way to visualize this is given an image with n layers: + // + // last layer in run image + // v + // [1, ..., k, k+1, ..., n] + // ^ + // first layer added by buildpacks + // + // the first 1 to k layers all belong to the run image, + // the last k+1 to n layers are added by buildpacks. + // the sum of all of these is our app image. + Base platform.RunImageMetadata + + // BOM or Bill of materials, contains dependency and + // version information provided by each buildpack. + BOM []buildpack.BOMEntry + + // Stack includes the run image name, and a list of image mirrors, + // where the run image is hosted. + Stack platform.StackMetadata + + // Processes lists all processes contributed by buildpacks. + Processes ProcessDetails +} + +// ProcessDetails is a collection of all start command metadata +// on an image. +type ProcessDetails struct { + // An Images default start command. + DefaultProcess *launch.Process + + // List of all start commands contributed by buildpacks. + OtherProcesses []launch.Process +} + +// Deserialize just the subset of fields we need to avoid breaking changes +type layersMetadata struct { + RunImage platform.RunImageMetadata `json:"runImage" toml:"run-image"` + Stack platform.StackMetadata `json:"stack" toml:"stack"` +} + +const ( + platformAPIEnv = "CNB_PLATFORM_API" + cnbProcessEnv = "CNB_PROCESS_TYPE" + launcherEntrypoint = "/cnb/lifecycle/launcher" + windowsLauncherEntrypoint = `c:\cnb\lifecycle\launcher.exe` + entrypointPrefix = "/cnb/process/" + windowsEntrypointPrefix = `c:\cnb\process\` + defaultProcess = "web" + fallbackPlatformAPI = "0.3" + windowsPrefix = "c:" +) + +// InspectImage reads the Label metadata of an image. It initializes a ImageInfo object +// using this metadata, and returns it. +// If daemon is true, first the local registry will be searched for the image. +// Otherwise it assumes the image is remote. +func (c *Client) InspectImage(name string, daemon bool) (*ImageInfo, error) { + img, err := c.imageFetcher.Fetch(context.Background(), name, image.FetchOptions{Daemon: daemon, PullPolicy: image.PullNever}) + if err != nil { + if errors.Cause(err) == image.ErrNotFound { + return nil, nil + } + return nil, err + } + + var layersMd layersMetadata + if _, err := dist.GetLabel(img, platform.LayerMetadataLabel, &layersMd); err != nil { + return nil, err + } + + var buildMD platform.BuildMetadata + if _, err := dist.GetLabel(img, platform.BuildMetadataLabel, &buildMD); err != nil { + return nil, err + } + + minimumBaseImageReferenceVersion := semver.MustParse("0.5.0") + actualLauncherVersion, err := semver.NewVersion(buildMD.Launcher.Version) + + if err == nil && actualLauncherVersion.LessThan(minimumBaseImageReferenceVersion) { + layersMd.RunImage.Reference = "" + } + + stackID, err := img.Label(platform.StackIDLabel) + if err != nil { + return nil, err + } + + platformAPI, err := img.Env(platformAPIEnv) + if err != nil { + return nil, errors.Wrap(err, "reading platform api") + } + + if platformAPI == "" { + platformAPI = fallbackPlatformAPI + } + + platformAPIVersion, err := semver.NewVersion(platformAPI) + if err != nil { + return nil, errors.Wrap(err, "parsing platform api version") + } + + var defaultProcessType string + if platformAPIVersion.LessThan(semver.MustParse("0.4")) { + defaultProcessType, err = img.Env(cnbProcessEnv) + if err != nil || defaultProcessType == "" { + defaultProcessType = defaultProcess + } + } else { + entrypoint, err := img.Entrypoint() + if err != nil { + return nil, errors.Wrap(err, "reading entrypoint") + } + + if len(entrypoint) > 0 && entrypoint[0] != launcherEntrypoint && entrypoint[0] != windowsLauncherEntrypoint { + process := entrypoint[0] + if strings.HasPrefix(process, windowsPrefix) { + process = strings.TrimPrefix(process, windowsEntrypointPrefix) + process = strings.TrimSuffix(process, ".exe") // Trim .exe for Windows support + } else { + process = strings.TrimPrefix(process, entrypointPrefix) + } + + defaultProcessType = process + } + } + + var processDetails ProcessDetails + for _, proc := range buildMD.Processes { + proc := proc + if proc.Type == defaultProcessType { + processDetails.DefaultProcess = &proc + continue + } + processDetails.OtherProcesses = append(processDetails.OtherProcesses, proc) + } + + return &ImageInfo{ + StackID: stackID, + Stack: layersMd.Stack, + Base: layersMd.RunImage, + BOM: buildMD.BOM, + Buildpacks: buildMD.Buildpacks, + Processes: processDetails, + }, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/new_buildpack.go b/vendor/github.com/buildpacks/pack/pkg/client/new_buildpack.go new file mode 100644 index 0000000000..df85eb0168 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/new_buildpack.go @@ -0,0 +1,137 @@ +package client + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/BurntSushi/toml" + + "github.com/buildpacks/lifecycle/api" + + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/dist" +) + +var ( + bashBinBuild = `#!/usr/bin/env bash + +set -euo pipefail + +layers_dir="$1" +env_dir="$2/env" +plan_path="$3" + +exit 0 +` + bashBinDetect = `#!/usr/bin/env bash + +exit 0 +` +) + +type NewBuildpackOptions struct { + // api compat version of the output buildpack artifact. + API string + + // The base directory to generate assets + Path string + + // The ID of the output buildpack artifact. + ID string + + // version of the output buildpack artifact. + Version string + + // The stacks this buildpack will work with + Stacks []dist.Stack +} + +func (c *Client) NewBuildpack(ctx context.Context, opts NewBuildpackOptions) error { + err := createBuildpackTOML(opts.Path, opts.ID, opts.Version, opts.API, opts.Stacks, c) + if err != nil { + return err + } + return createBashBuildpack(opts.Path, c) +} + +func createBashBuildpack(path string, c *Client) error { + if err := createBinScript(path, "build", bashBinBuild, c); err != nil { + return err + } + + if err := createBinScript(path, "detect", bashBinDetect, c); err != nil { + return err + } + + return nil +} + +func createBinScript(path, name, contents string, c *Client) error { + binDir := filepath.Join(path, "bin") + binFile := filepath.Join(binDir, name) + + _, err := os.Stat(binFile) + if os.IsNotExist(err) { + // The following line's comment is for gosec, it will ignore rule 301 in this case + // G301: Expect directory permissions to be 0750 or less + /* #nosec G301 */ + if err := os.MkdirAll(binDir, 0755); err != nil { + return err + } + // The following line's comment is for gosec, it will ignore rule 306 in this case + // G306: Expect WriteFile permissions to be 0600 or less + /* #nosec G306 */ + err = ioutil.WriteFile(binFile, []byte(contents), 0755) + if err != nil { + return err + } + + if c != nil { + c.logger.Infof(" %s bin/%s", style.Symbol("create"), name) + } + } + return nil +} + +func createBuildpackTOML(path, id, version, apiStr string, stacks []dist.Stack, c *Client) error { + api, err := api.NewVersion(apiStr) + if err != nil { + return err + } + + buildpackTOML := dist.BuildpackDescriptor{ + API: api, + Stacks: stacks, + Info: dist.BuildpackInfo{ + ID: id, + Version: version, + }, + } + + // The following line's comment is for gosec, it will ignore rule 301 in this case + // G301: Expect directory permissions to be 0750 or less + /* #nosec G301 */ + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + + buildpackTOMLPath := filepath.Join(path, "buildpack.toml") + _, err = os.Stat(buildpackTOMLPath) + if os.IsNotExist(err) { + f, err := os.Create(buildpackTOMLPath) + if err != nil { + return err + } + if err := toml.NewEncoder(f).Encode(buildpackTOML); err != nil { + return err + } + defer f.Close() + if c != nil { + c.logger.Infof(" %s buildpack.toml", style.Symbol("create")) + } + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/package_buildpack.go b/vendor/github.com/buildpacks/pack/pkg/client/package_buildpack.go new file mode 100644 index 0000000000..c26d27623c --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/package_buildpack.go @@ -0,0 +1,157 @@ +package client + +import ( + "context" + + "github.com/pkg/errors" + + pubbldpkg "github.com/buildpacks/pack/buildpackage" + "github.com/buildpacks/pack/internal/layer" + "github.com/buildpacks/pack/internal/paths" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/blob" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/image" +) + +const ( + // Packaging indicator that format of inputs/outputs will be an OCI image on the registry. + FormatImage = "image" + + // Packaging indicator that format of output will be a file on the host filesystem. + FormatFile = "file" + + // CNBExtension is the file extension for a cloud native buildpack tar archive + CNBExtension = ".cnb" +) + +// PackageBuildpackOptions is a configuration object used to define +// the behavior of PackageBuildpack. +type PackageBuildpackOptions struct { + // The base director to resolve relative assest from + RelativeBaseDir string + + // The name of the output buildpack artifact. + Name string + + // Type of output format, The options are the either the const FormatImage, or FormatFile. + Format string + + // Defines the Buildpacks configuration. + Config pubbldpkg.Config + + // Push resulting builder image up to a registry + // specified in the Name variable. + Publish bool + + // Strategy for updating images before packaging. + PullPolicy image.PullPolicy + + // Name of the buildpack registry. Used to + // add buildpacks to a package. + Registry string +} + +// PackageBuildpack packages buildpack(s) into either an image or file. +func (c *Client) PackageBuildpack(ctx context.Context, opts PackageBuildpackOptions) error { + if opts.Format == "" { + opts.Format = FormatImage + } + + if opts.Config.Platform.OS == "windows" && !c.experimental { + return NewExperimentError("Windows buildpackage support is currently experimental.") + } + + err := c.validateOSPlatform(ctx, opts.Config.Platform.OS, opts.Publish, opts.Format) + if err != nil { + return err + } + + writerFactory, err := layer.NewWriterFactory(opts.Config.Platform.OS) + if err != nil { + return errors.Wrap(err, "creating layer writer factory") + } + + packageBuilder := buildpack.NewBuilder(c.imageFactory) + + bpURI := opts.Config.Buildpack.URI + if bpURI == "" { + return errors.New("buildpack URI must be provided") + } + + mainBlob, err := c.downloadBuildpackFromURI(ctx, bpURI, opts.RelativeBaseDir) + if err != nil { + return err + } + + bp, err := buildpack.FromRootBlob(mainBlob, writerFactory) + if err != nil { + return errors.Wrapf(err, "creating buildpack from %s", style.Symbol(bpURI)) + } + + packageBuilder.SetBuildpack(bp) + + for _, dep := range opts.Config.Dependencies { + var depBPs []buildpack.Buildpack + mainBP, deps, err := c.buildpackDownloader.Download(ctx, dep.URI, buildpack.DownloadOptions{ + RegistryName: opts.Registry, + RelativeBaseDir: opts.RelativeBaseDir, + ImageOS: opts.Config.Platform.OS, + ImageName: dep.ImageName, + Daemon: !opts.Publish, + PullPolicy: opts.PullPolicy, + }) + + if err != nil { + return errors.Wrapf(err, "packaging dependencies (uri=%s,image=%s)", style.Symbol(dep.URI), style.Symbol(dep.ImageName)) + } + + depBPs = append([]buildpack.Buildpack{mainBP}, deps...) + for _, depBP := range depBPs { + packageBuilder.AddDependency(depBP) + } + } + + switch opts.Format { + case FormatFile: + return packageBuilder.SaveAsFile(opts.Name, opts.Config.Platform.OS) + case FormatImage: + _, err = packageBuilder.SaveAsImage(opts.Name, opts.Publish, opts.Config.Platform.OS) + return errors.Wrapf(err, "saving image") + default: + return errors.Errorf("unknown format: %s", style.Symbol(opts.Format)) + } +} + +func (c *Client) downloadBuildpackFromURI(ctx context.Context, uri, relativeBaseDir string) (blob.Blob, error) { + absPath, err := paths.FilePathToURI(uri, relativeBaseDir) + if err != nil { + return nil, errors.Wrapf(err, "making absolute: %s", style.Symbol(uri)) + } + uri = absPath + + c.logger.Debugf("Downloading buildpack from URI: %s", style.Symbol(uri)) + blob, err := c.downloader.Download(ctx, uri) + if err != nil { + return nil, errors.Wrapf(err, "downloading buildpack from %s", style.Symbol(uri)) + } + + return blob, nil +} + +func (c *Client) validateOSPlatform(ctx context.Context, os string, publish bool, format string) error { + if publish || format == FormatFile { + return nil + } + + info, err := c.docker.Info(ctx) + if err != nil { + return err + } + + if info.OSType != os { + return errors.Errorf("invalid %s specified: DOCKER_OS is %s", style.Symbol("platform.os"), style.Symbol(info.OSType)) + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/pull_buildpack.go b/vendor/github.com/buildpacks/pack/pkg/client/pull_buildpack.go new file mode 100644 index 0000000000..d3942fbcfb --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/pull_buildpack.go @@ -0,0 +1,65 @@ +package client + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +// PullBuildpackOptions are options available for PullBuildpack +type PullBuildpackOptions struct { + // URI of the buildpack to retrieve. + URI string + // RegistryName to search for buildpacks from. + RegistryName string + // RelativeBaseDir to resolve relative assests from. + RelativeBaseDir string +} + +// PullBuildpack pulls given buildpack to be stored locally +func (c *Client) PullBuildpack(ctx context.Context, opts PullBuildpackOptions) error { + locatorType, err := buildpack.GetLocatorType(opts.URI, "", []dist.BuildpackInfo{}) + if err != nil { + return err + } + + switch locatorType { + case buildpack.PackageLocator: + imageName := buildpack.ParsePackageLocator(opts.URI) + c.logger.Debugf("Pulling buildpack from image: %s", imageName) + + _, err = c.imageFetcher.Fetch(ctx, imageName, image.FetchOptions{Daemon: true, PullPolicy: image.PullAlways}) + if err != nil { + return errors.Wrapf(err, "fetching image %s", style.Symbol(opts.URI)) + } + case buildpack.RegistryLocator: + c.logger.Debugf("Pulling buildpack from registry: %s", style.Symbol(opts.URI)) + registryCache, err := getRegistry(c.logger, opts.RegistryName) + + if err != nil { + return errors.Wrapf(err, "invalid registry '%s'", opts.RegistryName) + } + + registryBp, err := registryCache.LocateBuildpack(opts.URI) + if err != nil { + return errors.Wrapf(err, "locating in registry %s", style.Symbol(opts.URI)) + } + + _, err = c.imageFetcher.Fetch(ctx, registryBp.Address, image.FetchOptions{Daemon: true, PullPolicy: image.PullAlways}) + if err != nil { + return errors.Wrapf(err, "fetching image %s", style.Symbol(opts.URI)) + } + case buildpack.InvalidLocator: + return fmt.Errorf("invalid buildpack URI %s", style.Symbol(opts.URI)) + default: + return fmt.Errorf("unsupported buildpack URI type: %s", style.Symbol(locatorType.String())) + } + + return nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/rebase.go b/vendor/github.com/buildpacks/pack/pkg/client/rebase.go new file mode 100644 index 0000000000..a244c9fc67 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/rebase.go @@ -0,0 +1,95 @@ +package client + +import ( + "context" + + "github.com/buildpacks/lifecycle" + "github.com/buildpacks/lifecycle/platform" + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/build" + "github.com/buildpacks/pack/internal/builder" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +// RebaseOptions is a configuration struct that controls image rebase behavior. +type RebaseOptions struct { + // Name of image we wish to rebase. + RepoName string + + // Flag to publish image to remote registry after rebase completion. + Publish bool + + // Strategy for pulling images during rebase. + PullPolicy image.PullPolicy + + // Image to rebase against. This image must have + // the same StackID as the previous run image. + RunImage string + + // A mapping from StackID to an array of mirrors. + // This mapping used only if both RunImage is omitted and Publish is true. + // AdditionalMirrors gives us inputs to recalculate the 'best' run image + // based on the registry we are publishing to. + AdditionalMirrors map[string][]string +} + +// Rebase updates the run image layers in an app image. +// This operation mutates the image specified in opts. +func (c *Client) Rebase(ctx context.Context, opts RebaseOptions) error { + imageRef, err := c.parseTagReference(opts.RepoName) + if err != nil { + return errors.Wrapf(err, "invalid image name '%s'", opts.RepoName) + } + + appImage, err := c.imageFetcher.Fetch(ctx, opts.RepoName, image.FetchOptions{Daemon: !opts.Publish, PullPolicy: opts.PullPolicy}) + if err != nil { + return err + } + + var md platform.LayersMetadataCompat + if ok, err := dist.GetLabel(appImage, platform.LayerMetadataLabel, &md); err != nil { + return err + } else if !ok { + return errors.Errorf("could not find label %s on image", style.Symbol(platform.LayerMetadataLabel)) + } + + runImageName := c.resolveRunImage( + opts.RunImage, + imageRef.Context().RegistryStr(), + "", + builder.StackMetadata{ + RunImage: builder.RunImageMetadata{ + Image: md.Stack.RunImage.Image, + Mirrors: md.Stack.RunImage.Mirrors, + }, + }, + opts.AdditionalMirrors, + opts.Publish) + + if runImageName == "" { + return errors.New("run image must be specified") + } + + baseImage, err := c.imageFetcher.Fetch(ctx, runImageName, image.FetchOptions{Daemon: !opts.Publish, PullPolicy: opts.PullPolicy}) + if err != nil { + return err + } + + c.logger.Infof("Rebasing %s on run image %s", style.Symbol(appImage.Name()), style.Symbol(baseImage.Name())) + rebaser := &lifecycle.Rebaser{Logger: c.logger, PlatformAPI: build.SupportedPlatformAPIVersions.Latest()} + _, err = rebaser.Rebase(appImage, baseImage, nil) + if err != nil { + return err + } + + appImageIdentifier, err := appImage.Identifier() + if err != nil { + return err + } + + c.logger.Infof("Rebased Image: %s", style.Symbol(appImageIdentifier.String())) + return nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/register_buildpack.go b/vendor/github.com/buildpacks/pack/pkg/client/register_buildpack.go new file mode 100644 index 0000000000..1bae8e5d96 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/register_buildpack.go @@ -0,0 +1,119 @@ +package client + +import ( + "context" + "errors" + "net/url" + "runtime" + "strings" + + "github.com/buildpacks/pack/internal/registry" + "github.com/buildpacks/pack/pkg/buildpack" + "github.com/buildpacks/pack/pkg/dist" + "github.com/buildpacks/pack/pkg/image" +) + +// RegisterBuildpackOptions is a configuration struct that controls the +// behavior of the RegisterBuildpack function. +type RegisterBuildpackOptions struct { + ImageName string + Type string + URL string + Name string +} + +// RegisterBuildpack updates the Buildpack Registry with to include a new buildpack specified in +// the opts argument +func (c *Client) RegisterBuildpack(ctx context.Context, opts RegisterBuildpackOptions) error { + appImage, err := c.imageFetcher.Fetch(ctx, opts.ImageName, image.FetchOptions{Daemon: false, PullPolicy: image.PullAlways}) + if err != nil { + return err + } + + var buildpackInfo dist.BuildpackInfo + if _, err := dist.GetLabel(appImage, buildpack.MetadataLabel, &buildpackInfo); err != nil { + return err + } + + namespace, name, err := parseID(buildpackInfo.ID) + if err != nil { + return err + } + + id, err := appImage.Identifier() + if err != nil { + return err + } + + buildpack := registry.Buildpack{ + Namespace: namespace, + Name: name, + Version: buildpackInfo.Version, + Address: id.String(), + Yanked: false, + } + + if opts.Type == "github" { + issueURL, err := registry.GetIssueURL(opts.URL) + if err != nil { + return err + } + + issue, err := registry.CreateGithubIssue(buildpack) + if err != nil { + return err + } + + params := url.Values{} + params.Add("title", issue.Title) + params.Add("body", issue.Body) + issueURL.RawQuery = params.Encode() + + c.logger.Debugf("Open URL in browser: %s", issueURL) + cmd, err := registry.CreateBrowserCmd(issueURL.String(), runtime.GOOS) + if err != nil { + return err + } + + return cmd.Start() + } else if opts.Type == "git" { + registryCache, err := getRegistry(c.logger, opts.Name) + if err != nil { + return err + } + + username, err := parseUsernameFromURL(opts.URL) + if err != nil { + return err + } + + if err := registry.GitCommit(buildpack, username, registryCache); err != nil { + return err + } + } + + return nil +} + +func parseUsernameFromURL(url string) (string, error) { + parts := strings.Split(url, "/") + if len(parts) < 3 { + return "", errors.New("invalid url: cannot parse username from url") + } + if parts[3] == "" { + return "", errors.New("invalid url: username is empty") + } + + return parts[3], nil +} + +func parseID(id string) (string, string, error) { + parts := strings.Split(id, "/") + if len(parts) < 2 { + return "", "", errors.New("invalid id: does not contain a namespace") + } else if len(parts) > 2 { + return "", "", errors.New("invalid id: contains unexpected characters") + } + + return parts[0], parts[1], nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/version.go b/vendor/github.com/buildpacks/pack/pkg/client/version.go new file mode 100644 index 0000000000..a3533e8866 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/version.go @@ -0,0 +1,6 @@ +package client + +// Version returns the version of the client +func (c *Client) Version() string { + return c.version +} diff --git a/vendor/github.com/buildpacks/pack/pkg/client/yank_buildpack.go b/vendor/github.com/buildpacks/pack/pkg/client/yank_buildpack.go new file mode 100644 index 0000000000..173f22902c --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/client/yank_buildpack.go @@ -0,0 +1,56 @@ +package client + +import ( + "net/url" + "runtime" + + "github.com/buildpacks/pack/internal/registry" +) + +// YankBuildpackOptions is a configuration struct that controls the Yanking a buildpack +// from the Buildpack Registry. +type YankBuildpackOptions struct { + ID string + Version string + Type string + URL string + Yank bool +} + +// YankBuildpack marks a buildpack on the Buildpack Registry as 'yanked'. This forbids future +// builds from using it. +func (c *Client) YankBuildpack(opts YankBuildpackOptions) error { + namespace, name, err := registry.ParseNamespaceName(opts.ID) + if err != nil { + return err + } + issueURL, err := registry.GetIssueURL(opts.URL) + if err != nil { + return err + } + + buildpack := registry.Buildpack{ + Namespace: namespace, + Name: name, + Version: opts.Version, + Yanked: opts.Yank, + } + + issue, err := registry.CreateGithubIssue(buildpack) + if err != nil { + return err + } + + params := url.Values{} + params.Add("title", issue.Title) + params.Add("body", issue.Body) + issueURL.RawQuery = params.Encode() + + c.logger.Debugf("Open URL in browser: %s", issueURL) + cmd, err := registry.CreateBrowserCmd(issueURL.String(), runtime.GOOS) + if err != nil { + return err + } + + return cmd.Start() +} diff --git a/vendor/github.com/buildpacks/pack/pkg/dist/buildpack.go b/vendor/github.com/buildpacks/pack/pkg/dist/buildpack.go new file mode 100644 index 0000000000..4042c717d7 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/dist/buildpack.go @@ -0,0 +1,39 @@ +package dist + +const AssumedBuildpackAPIVersion = "0.1" +const BuildpacksDir = "/cnb/buildpacks" + +type BuildpackInfo struct { + ID string `toml:"id,omitempty" json:"id,omitempty" yaml:"id,omitempty"` + Name string `toml:"name,omitempty" json:"name,omitempty" yaml:"name,omitempty"` + Version string `toml:"version,omitempty" json:"version,omitempty" yaml:"version,omitempty"` + Description string `toml:"description,omitempty" json:"description,omitempty" yaml:"description,omitempty"` + Homepage string `toml:"homepage,omitempty" json:"homepage,omitempty" yaml:"homepage,omitempty"` + Keywords []string `toml:"keywords,omitempty" json:"keywords,omitempty" yaml:"keywords,omitempty"` + Licenses []License `toml:"licenses,omitempty" json:"licenses,omitempty" yaml:"licenses,omitempty"` +} + +func (b BuildpackInfo) FullName() string { + if b.Version != "" { + return b.ID + "@" + b.Version + } + return b.ID +} + +// Satisfy stringer +func (b BuildpackInfo) String() string { return b.FullName() } + +// Match compares two buildpacks by ID and Version +func (b BuildpackInfo) Match(o BuildpackInfo) bool { + return b.ID == o.ID && b.Version == o.Version +} + +type License struct { + Type string `toml:"type"` + URI string `toml:"uri"` +} + +type Stack struct { + ID string `json:"id" toml:"id"` + Mixins []string `json:"mixins,omitempty" toml:"mixins,omitempty"` +} diff --git a/vendor/github.com/buildpacks/pack/pkg/dist/buildpack_descriptor.go b/vendor/github.com/buildpacks/pack/pkg/dist/buildpack_descriptor.go new file mode 100644 index 0000000000..fea7f4816f --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/dist/buildpack_descriptor.go @@ -0,0 +1,60 @@ +package dist + +import ( + "fmt" + "sort" + "strings" + + "github.com/buildpacks/lifecycle/api" + + "github.com/buildpacks/pack/internal/stringset" + "github.com/buildpacks/pack/internal/style" +) + +type BuildpackDescriptor struct { + API *api.Version `toml:"api"` + Info BuildpackInfo `toml:"buildpack"` + Stacks []Stack `toml:"stacks"` + Order Order `toml:"order"` +} + +func (b *BuildpackDescriptor) EscapedID() string { + return strings.ReplaceAll(b.Info.ID, "/", "_") +} + +func (b *BuildpackDescriptor) EnsureStackSupport(stackID string, providedMixins []string, validateRunStageMixins bool) error { + if len(b.Stacks) == 0 { + return nil // Order buildpack, no validation required + } + + bpMixins, err := b.findMixinsForStack(stackID) + if err != nil { + return err + } + + if !validateRunStageMixins { + var filtered []string + for _, m := range bpMixins { + if !strings.HasPrefix(m, "run:") { + filtered = append(filtered, m) + } + } + bpMixins = filtered + } + + _, missing, _ := stringset.Compare(providedMixins, bpMixins) + if len(missing) > 0 { + sort.Strings(missing) + return fmt.Errorf("buildpack %s requires missing mixin(s): %s", style.Symbol(b.Info.FullName()), strings.Join(missing, ", ")) + } + return nil +} + +func (b *BuildpackDescriptor) findMixinsForStack(stackID string) ([]string, error) { + for _, s := range b.Stacks { + if s.ID == stackID || s.ID == "*" { + return s.Mixins, nil + } + } + return nil, fmt.Errorf("buildpack %s does not support stack %s", style.Symbol(b.Info.FullName()), style.Symbol(stackID)) +} diff --git a/vendor/github.com/buildpacks/pack/pkg/dist/dist.go b/vendor/github.com/buildpacks/pack/pkg/dist/dist.go new file mode 100644 index 0000000000..6ccee5bcc9 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/dist/dist.go @@ -0,0 +1,69 @@ +package dist + +import ( + "github.com/buildpacks/lifecycle/api" +) + +const BuildpackLayersLabel = "io.buildpacks.buildpack.layers" + +type BuildpackURI struct { + URI string `toml:"uri"` +} + +type ImageRef struct { + ImageName string `toml:"image"` +} + +type ImageOrURI struct { + BuildpackURI + ImageRef +} + +func (c *ImageOrURI) DisplayString() string { + if c.BuildpackURI.URI != "" { + return c.BuildpackURI.URI + } + + return c.ImageRef.ImageName +} + +type Platform struct { + OS string `toml:"os"` +} + +type Order []OrderEntry + +type OrderEntry struct { + Group []BuildpackRef `toml:"group" json:"group"` +} + +type BuildpackRef struct { + BuildpackInfo `yaml:"buildpackinfo,inline"` + Optional bool `toml:"optional,omitempty" json:"optional,omitempty" yaml:"optional,omitempty"` +} + +type BuildpackLayers map[string]map[string]BuildpackLayerInfo + +type BuildpackLayerInfo struct { + API *api.Version `json:"api"` + Stacks []Stack `json:"stacks,omitempty"` + Order Order `json:"order,omitempty"` + LayerDiffID string `json:"layerDiffID"` + Homepage string `json:"homepage,omitempty"` + Name string `json:"name,omitempty"` +} + +func (b BuildpackLayers) Get(id, version string) (BuildpackLayerInfo, bool) { + buildpackLayerEntries, ok := b[id] + if !ok { + return BuildpackLayerInfo{}, false + } + if len(buildpackLayerEntries) == 1 && version == "" { + for key := range buildpackLayerEntries { + version = key + } + } + + result, ok := buildpackLayerEntries[version] + return result, ok +} diff --git a/vendor/github.com/buildpacks/pack/pkg/dist/distribution.go b/vendor/github.com/buildpacks/pack/pkg/dist/distribution.go new file mode 100644 index 0000000000..b9232771b8 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/dist/distribution.go @@ -0,0 +1,3 @@ +// Package dist is responsible for cataloging all data types in relation +// to distributing Cloud Native Buildpack components. +package dist diff --git a/vendor/github.com/buildpacks/pack/pkg/dist/image.go b/vendor/github.com/buildpacks/pack/pkg/dist/image.go new file mode 100644 index 0000000000..6032e57cdd --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/dist/image.go @@ -0,0 +1,42 @@ +package dist + +import ( + "encoding/json" + + "github.com/pkg/errors" + + "github.com/buildpacks/pack/internal/style" +) + +type Labeled interface { + Label(name string) (value string, err error) +} + +type Labelable interface { + SetLabel(name string, value string) error +} + +func SetLabel(labelable Labelable, label string, data interface{}) error { + dataBytes, err := json.Marshal(data) + if err != nil { + return errors.Wrapf(err, "marshalling data to JSON for label %s", style.Symbol(label)) + } + if err := labelable.SetLabel(label, string(dataBytes)); err != nil { + return errors.Wrapf(err, "setting label %s", style.Symbol(label)) + } + return nil +} + +func GetLabel(labeled Labeled, label string, obj interface{}) (ok bool, err error) { + labelData, err := labeled.Label(label) + if err != nil { + return false, errors.Wrapf(err, "retrieving label %s", style.Symbol(label)) + } + if labelData != "" { + if err := json.Unmarshal([]byte(labelData), obj); err != nil { + return false, errors.Wrapf(err, "unmarshalling label %s", style.Symbol(label)) + } + return true, nil + } + return false, nil +} diff --git a/vendor/github.com/buildpacks/pack/pkg/dist/layers.go b/vendor/github.com/buildpacks/pack/pkg/dist/layers.go new file mode 100644 index 0000000000..873f86ca4f --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/dist/layers.go @@ -0,0 +1,45 @@ +package dist + +import ( + "os" + "path/filepath" + + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/tarball" + "github.com/pkg/errors" +) + +func LayerDiffID(layerTarPath string) (v1.Hash, error) { + fh, err := os.Open(filepath.Clean(layerTarPath)) + if err != nil { + return v1.Hash{}, errors.Wrap(err, "opening tar file") + } + defer fh.Close() + + layer, err := tarball.LayerFromFile(layerTarPath) + if err != nil { + return v1.Hash{}, errors.Wrap(err, "reading layer tar") + } + + hash, err := layer.DiffID() + if err != nil { + return v1.Hash{}, errors.Wrap(err, "generating diff id") + } + + return hash, nil +} + +func AddBuildpackToLayersMD(layerMD BuildpackLayers, descriptor BuildpackDescriptor, diffID string) { + bpInfo := descriptor.Info + if _, ok := layerMD[bpInfo.ID]; !ok { + layerMD[bpInfo.ID] = map[string]BuildpackLayerInfo{} + } + layerMD[bpInfo.ID][bpInfo.Version] = BuildpackLayerInfo{ + API: descriptor.API, + Stacks: descriptor.Stacks, + Order: descriptor.Order, + LayerDiffID: diffID, + Homepage: bpInfo.Homepage, + Name: bpInfo.Name, + } +} diff --git a/vendor/github.com/buildpacks/pack/pkg/image/fetcher.go b/vendor/github.com/buildpacks/pack/pkg/image/fetcher.go new file mode 100644 index 0000000000..09990517af --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/image/fetcher.go @@ -0,0 +1,187 @@ +package image + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "strings" + + "github.com/buildpacks/imgutil" + "github.com/buildpacks/imgutil/local" + "github.com/buildpacks/imgutil/remote" + "github.com/buildpacks/lifecycle/auth" + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/google/go-containerregistry/pkg/authn" + "github.com/pkg/errors" + + pname "github.com/buildpacks/pack/internal/name" + "github.com/buildpacks/pack/internal/style" + "github.com/buildpacks/pack/internal/term" + "github.com/buildpacks/pack/pkg/logging" +) + +// FetcherOption is a type of function that mutate settings on the client. +// Values in these functions are set through currying. +type FetcherOption func(c *Fetcher) + +// WithRegistryMirrors supply your own mirrors for registry. +func WithRegistryMirrors(registryMirrors map[string]string) FetcherOption { + return func(c *Fetcher) { + c.registryMirrors = registryMirrors + } +} + +type Fetcher struct { + docker client.CommonAPIClient + logger logging.Logger + registryMirrors map[string]string +} + +type FetchOptions struct { + Daemon bool + Platform string + PullPolicy PullPolicy +} + +func NewFetcher(logger logging.Logger, docker client.CommonAPIClient, opts ...FetcherOption) *Fetcher { + var fetcher = &Fetcher{ + logger: logger, + docker: docker, + } + + for _, opt := range opts { + opt(fetcher) + } + + return fetcher +} + +var ErrNotFound = errors.New("not found") + +func (f *Fetcher) Fetch(ctx context.Context, name string, options FetchOptions) (imgutil.Image, error) { + name, err := pname.TranslateRegistry(name, f.registryMirrors, f.logger) + if err != nil { + return nil, err + } + + if !options.Daemon { + return f.fetchRemoteImage(name) + } + + switch options.PullPolicy { + case PullNever: + img, err := f.fetchDaemonImage(name) + return img, err + case PullIfNotPresent: + img, err := f.fetchDaemonImage(name) + if err == nil || !errors.Is(err, ErrNotFound) { + return img, err + } + } + + f.logger.Debugf("Pulling image %s", style.Symbol(name)) + err = f.pullImage(ctx, name, options.Platform) + if err != nil && !errors.Is(err, ErrNotFound) { + return nil, err + } + + return f.fetchDaemonImage(name) +} + +func (f *Fetcher) fetchDaemonImage(name string) (imgutil.Image, error) { + image, err := local.NewImage(name, f.docker, local.FromBaseImage(name)) + if err != nil { + return nil, err + } + + if !image.Found() { + return nil, errors.Wrapf(ErrNotFound, "image %s does not exist on the daemon", style.Symbol(name)) + } + + return image, nil +} + +func (f *Fetcher) fetchRemoteImage(name string) (imgutil.Image, error) { + image, err := remote.NewImage(name, authn.DefaultKeychain, remote.FromBaseImage(name)) + if err != nil { + return nil, err + } + + if !image.Found() { + return nil, errors.Wrapf(ErrNotFound, "image %s does not exist in registry", style.Symbol(name)) + } + + return image, nil +} + +func (f *Fetcher) pullImage(ctx context.Context, imageID string, platform string) error { + regAuth, err := registryAuth(imageID) + if err != nil { + return err + } + + rc, err := f.docker.ImagePull(ctx, imageID, types.ImagePullOptions{RegistryAuth: regAuth, Platform: platform}) + if err != nil { + if client.IsErrNotFound(err) { + return errors.Wrapf(ErrNotFound, "image %s does not exist on the daemon", style.Symbol(imageID)) + } + + return err + } + + writer := logging.GetWriterForLevel(f.logger, logging.InfoLevel) + termFd, isTerm := term.IsTerminal(writer) + + err = jsonmessage.DisplayJSONMessagesStream(rc, &colorizedWriter{writer}, termFd, isTerm, nil) + if err != nil { + return err + } + + return rc.Close() +} + +func registryAuth(ref string) (string, error) { + _, a, err := auth.ReferenceForRepoName(authn.DefaultKeychain, ref) + if err != nil { + return "", errors.Wrapf(err, "resolve auth for ref %s", ref) + } + authConfig, err := a.Authorization() + if err != nil { + return "", err + } + + dataJSON, err := json.Marshal(authConfig) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(dataJSON), nil +} + +type colorizedWriter struct { + writer io.Writer +} + +type colorFunc = func(string, ...interface{}) string + +func (w *colorizedWriter) Write(p []byte) (n int, err error) { + msg := string(p) + colorizers := map[string]colorFunc{ + "Waiting": style.Waiting, + "Pulling fs layer": style.Waiting, + "Downloading": style.Working, + "Download complete": style.Working, + "Extracting": style.Working, + "Pull complete": style.Complete, + "Already exists": style.Complete, + "=": style.ProgressBar, + ">": style.ProgressBar, + } + for pattern, colorize := range colorizers { + msg = strings.ReplaceAll(msg, pattern, colorize(pattern)) + } + return w.writer.Write([]byte(msg)) +} diff --git a/vendor/github.com/buildpacks/pack/pkg/image/pull_policy.go b/vendor/github.com/buildpacks/pack/pkg/image/pull_policy.go new file mode 100644 index 0000000000..f08678ac5a --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/image/pull_policy.go @@ -0,0 +1,41 @@ +package image + +import ( + "github.com/pkg/errors" +) + +// PullPolicy defines a policy for how to manage images +type PullPolicy int + +const ( + // PullAlways images, even if they are present + PullAlways PullPolicy = iota + // PullNever images, even if they are not present + PullNever + // PullIfNotPresent pulls images if they aren't present + PullIfNotPresent +) + +var nameMap = map[string]PullPolicy{"always": PullAlways, "never": PullNever, "if-not-present": PullIfNotPresent, "": PullAlways} + +// ParsePullPolicy from string +func ParsePullPolicy(policy string) (PullPolicy, error) { + if val, ok := nameMap[policy]; ok { + return val, nil + } + + return PullAlways, errors.Errorf("invalid pull policy %s", policy) +} + +func (p PullPolicy) String() string { + switch p { + case PullAlways: + return "always" + case PullNever: + return "never" + case PullIfNotPresent: + return "if-not-present" + } + + return "" +} diff --git a/vendor/github.com/buildpacks/pack/pkg/logging/logger_simple.go b/vendor/github.com/buildpacks/pack/pkg/logging/logger_simple.go new file mode 100644 index 0000000000..beb18e72e6 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/logging/logger_simple.go @@ -0,0 +1,66 @@ +package logging + +import ( + "fmt" + "io" + "log" +) + +// NewSimpleLogger creates a simple logger for the pack library. +func NewSimpleLogger(w io.Writer) Logger { + return &simpleLogger{ + out: log.New(w, "", log.LstdFlags|log.Lmicroseconds), + } +} + +type simpleLogger struct { + out *log.Logger +} + +const ( + debugPrefix = "DEBUG:" + infoPrefix = "INFO:" + warnPrefix = "WARN:" + errorPrefix = "ERROR:" + prefixFmt = "%-7s %s" +) + +func (l *simpleLogger) Debug(msg string) { + l.out.Printf(prefixFmt, debugPrefix, msg) +} + +func (l *simpleLogger) Debugf(format string, v ...interface{}) { + l.out.Printf(prefixFmt, debugPrefix, fmt.Sprintf(format, v...)) +} + +func (l *simpleLogger) Info(msg string) { + l.out.Printf(prefixFmt, infoPrefix, msg) +} + +func (l *simpleLogger) Infof(format string, v ...interface{}) { + l.out.Printf(prefixFmt, infoPrefix, fmt.Sprintf(format, v...)) +} + +func (l *simpleLogger) Warn(msg string) { + l.out.Printf(prefixFmt, warnPrefix, msg) +} + +func (l *simpleLogger) Warnf(format string, v ...interface{}) { + l.out.Printf(prefixFmt, warnPrefix, fmt.Sprintf(format, v...)) +} + +func (l *simpleLogger) Error(msg string) { + l.out.Printf(prefixFmt, errorPrefix, msg) +} + +func (l *simpleLogger) Errorf(format string, v ...interface{}) { + l.out.Printf(prefixFmt, errorPrefix, fmt.Sprintf(format, v...)) +} + +func (l *simpleLogger) Writer() io.Writer { + return l.out.Writer() +} + +func (l *simpleLogger) IsVerbose() bool { + return false +} diff --git a/vendor/github.com/buildpacks/pack/pkg/logging/logger_writers.go b/vendor/github.com/buildpacks/pack/pkg/logging/logger_writers.go new file mode 100644 index 0000000000..dceb66c462 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/logging/logger_writers.go @@ -0,0 +1,215 @@ +// Package logging implements the logger for the pack CLI. +package logging + +import ( + "fmt" + "io" + "io/ioutil" + "regexp" + "sync" + "time" + + "github.com/apex/log" + "github.com/heroku/color" + + "github.com/buildpacks/pack/internal/style" +) + +const ( + errorLevelText = "ERROR: " + warnLevelText = "Warning: " + lineFeed = '\n' + // log level to use when quiet is true + quietLevel = log.WarnLevel + // log level to use when debug is true + verboseLevel = log.DebugLevel + // time format the out logging uses + timeFmt = "2006/01/02 15:04:05.000000" + // InvalidFileDescriptor based on https://golang.org/src/os/file_unix.go?s=2183:2210#L57 + InvalidFileDescriptor = ^(uintptr(0)) +) + +var colorCodeMatcher = regexp.MustCompile(`\x1b\[[0-9;]*m`) + +var _ Logger = (*LogWithWriters)(nil) + +// LogWithWriters is a logger used with the pack CLI, allowing users to print logs for various levels, including Info, Debug and Error +type LogWithWriters struct { + sync.Mutex + log.Logger + wantTime bool + clock func() time.Time + out io.Writer + errOut io.Writer +} + +// NewLogWithWriters creates a logger to be used with pack CLI. +func NewLogWithWriters(stdout, stderr io.Writer, opts ...func(*LogWithWriters)) *LogWithWriters { + lw := &LogWithWriters{ + Logger: log.Logger{ + Level: log.InfoLevel, + }, + wantTime: false, + clock: time.Now, + out: stdout, + errOut: stderr, + } + lw.Logger.Handler = lw + + for _, opt := range opts { + opt(lw) + } + + return lw +} + +// WithClock is an option used to initialize a LogWithWriters with a given clock function +func WithClock(clock func() time.Time) func(writers *LogWithWriters) { + return func(logger *LogWithWriters) { + logger.clock = clock + } +} + +// WithVerbose is an option used to initialize a LogWithWriters with Verbose turned on +func WithVerbose() func(writers *LogWithWriters) { + return func(logger *LogWithWriters) { + logger.Level = log.DebugLevel + } +} + +// HandleLog handles log events, printing entries appropriately +func (lw *LogWithWriters) HandleLog(e *log.Entry) error { + lw.Lock() + defer lw.Unlock() + + writer := lw.WriterForLevel(Level(e.Level)) + _, err := fmt.Fprint(writer, appendMissingLineFeed(fmt.Sprintf("%s%s", formatLevel(e.Level), e.Message))) + + return err +} + +// WriterForLevel returns a Writer for the given Level +func (lw *LogWithWriters) WriterForLevel(level Level) io.Writer { + if lw.Level > log.Level(level) { + return ioutil.Discard + } + + if level == ErrorLevel { + return newLogWriter(lw.errOut, lw.clock, lw.wantTime) + } + + return newLogWriter(lw.out, lw.clock, lw.wantTime) +} + +// Writer returns the base Writer for the LogWithWriters +func (lw *LogWithWriters) Writer() io.Writer { + return lw.out +} + +// WantTime turns timestamps on in log entries +func (lw *LogWithWriters) WantTime(f bool) { + lw.wantTime = f +} + +// WantQuiet reduces the number of logs returned +func (lw *LogWithWriters) WantQuiet(f bool) { + if f { + lw.Level = quietLevel + } +} + +// WantVerbose increases the number of logs returned +func (lw *LogWithWriters) WantVerbose(f bool) { + if f { + lw.Level = verboseLevel + } +} + +// IsVerbose returns whether verbose logging is on +func (lw *LogWithWriters) IsVerbose() bool { + return lw.Level == log.DebugLevel +} + +func formatLevel(ll log.Level) string { + switch ll { + case log.ErrorLevel: + return style.Error(errorLevelText) + case log.WarnLevel: + return style.Warn(warnLevelText) + } + + return "" +} + +// Preserve behavior of other loggers +func appendMissingLineFeed(msg string) string { + buff := []byte(msg) + if len(buff) == 0 || buff[len(buff)-1] != lineFeed { + buff = append(buff, lineFeed) + } + return string(buff) +} + +// logWriter is a writer used for logs +type logWriter struct { + sync.Mutex + out io.Writer + clock func() time.Time + wantTime bool + wantNoColor bool +} + +func newLogWriter(writer io.Writer, clock func() time.Time, wantTime bool) *logWriter { + wantNoColor := !color.Enabled() + return &logWriter{ + out: writer, + clock: clock, + wantTime: wantTime, + wantNoColor: wantNoColor, + } +} + +// Write writes a message prepended by the time to the set io.Writer +func (lw *logWriter) Write(buf []byte) (n int, err error) { + lw.Lock() + defer lw.Unlock() + + length := len(buf) + if lw.wantNoColor { + buf = stripColor(buf) + } + + prefix := "" + if lw.wantTime { + prefix = fmt.Sprintf("%s ", lw.clock().Format(timeFmt)) + } + + _, err = fmt.Fprintf(lw.out, "%s%s", prefix, buf) + return length, err +} + +// Writer returns the base Writer for the logWriter +func (lw *logWriter) Writer() io.Writer { + return lw.out +} + +// Fd returns the file descriptor of the writer. This is used to ensure it is a Console, and can therefore display streams of text +func (lw *logWriter) Fd() uintptr { + lw.Lock() + defer lw.Unlock() + + if file, ok := lw.out.(hasDescriptor); ok { + return file.Fd() + } + + return InvalidFileDescriptor +} + +// Remove all ANSI color information. +func stripColor(b []byte) []byte { + return colorCodeMatcher.ReplaceAll(b, []byte("")) +} + +type hasDescriptor interface { + Fd() uintptr +} diff --git a/vendor/github.com/buildpacks/pack/pkg/logging/logging.go b/vendor/github.com/buildpacks/pack/pkg/logging/logging.go new file mode 100644 index 0000000000..6d9a026950 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/logging/logging.go @@ -0,0 +1,66 @@ +// Package logging defines the minimal interface that loggers must support to be used by client. +package logging + +import ( + "io" + "io/ioutil" + + "github.com/buildpacks/pack/internal/style" +) + +type Level int + +const ( + DebugLevel Level = iota + InfoLevel + WarnLevel + ErrorLevel +) + +// Logger defines behavior required by a logging package used by pack libraries +type Logger interface { + Debug(msg string) + Debugf(fmt string, v ...interface{}) + + Info(msg string) + Infof(fmt string, v ...interface{}) + + Warn(msg string) + Warnf(fmt string, v ...interface{}) + + Error(msg string) + Errorf(fmt string, v ...interface{}) + + Writer() io.Writer + + IsVerbose() bool +} + +type isSelectableWriter interface { + WriterForLevel(level Level) io.Writer +} + +// GetWriterForLevel retrieves the appropriate Writer for the log level provided. +// +// See isSelectableWriter +func GetWriterForLevel(logger Logger, level Level) io.Writer { + if w, ok := logger.(isSelectableWriter); ok { + return w.WriterForLevel(level) + } + + return logger.Writer() +} + +// IsQuiet defines whether a pack logger is set to quiet mode +func IsQuiet(logger Logger) bool { + if writer := GetWriterForLevel(logger, InfoLevel); writer == ioutil.Discard { + return true + } + + return false +} + +// Tip logs a tip. +func Tip(l Logger, format string, v ...interface{}) { + l.Infof(style.Tip("Tip: ")+format, v...) +} diff --git a/vendor/github.com/buildpacks/pack/pkg/logging/prefix_writer.go b/vendor/github.com/buildpacks/pack/pkg/logging/prefix_writer.go new file mode 100644 index 0000000000..cce4ee6772 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/logging/prefix_writer.go @@ -0,0 +1,126 @@ +package logging + +import ( + "bufio" + "bytes" + "fmt" + "io" + + "github.com/buildpacks/pack/internal/style" +) + +// PrefixWriter is a buffering writer that prefixes each new line. Close should be called to properly flush the buffer. +type PrefixWriter struct { + out io.Writer + buf *bytes.Buffer + prefix string + readerFactory func(data []byte) io.Reader +} + +type PrefixWriterOption func(c *PrefixWriter) + +func WithReaderFactory(factory func(data []byte) io.Reader) PrefixWriterOption { + return func(writer *PrefixWriter) { + writer.readerFactory = factory + } +} + +// NewPrefixWriter writes by w will be prefixed +func NewPrefixWriter(w io.Writer, prefix string, opts ...PrefixWriterOption) *PrefixWriter { + writer := &PrefixWriter{ + out: w, + prefix: fmt.Sprintf("[%s] ", style.Prefix(prefix)), + buf: &bytes.Buffer{}, + readerFactory: func(data []byte) io.Reader { + return bytes.NewReader(data) + }, + } + + for _, opt := range opts { + opt(writer) + } + + return writer +} + +// Write writes bytes to the embedded log function +func (w *PrefixWriter) Write(data []byte) (int, error) { + scanner := bufio.NewScanner(w.readerFactory(data)) + scanner.Split(ScanLinesKeepNewLine) + for scanner.Scan() { + newBits := scanner.Bytes() + if len(newBits) > 0 && newBits[len(newBits)-1] != '\n' { // just append if we don't have a new line + _, err := w.buf.Write(newBits) + if err != nil { + return 0, err + } + } else { // write our complete message + _, err := w.buf.Write(bytes.TrimRight(newBits, "\n")) + if err != nil { + return 0, err + } + + err = w.flush() + if err != nil { + return 0, err + } + } + } + + if err := scanner.Err(); err != nil { + return 0, err + } + + return len(data), nil +} + +// Close writes any pending data in the buffer +func (w *PrefixWriter) Close() error { + if w.buf.Len() > 0 { + return w.flush() + } + + return nil +} + +func (w *PrefixWriter) flush() error { + bits := w.buf.Bytes() + w.buf.Reset() + + // process any CR in message + if i := bytes.LastIndexByte(bits, '\r'); i >= 0 { + bits = bits[i+1:] + } + + _, err := fmt.Fprint(w.out, w.prefix+string(bits)+"\n") + return err +} + +// A customized implementation of bufio.ScanLines that preserves new line characters. +func ScanLinesKeepNewLine(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + + // first we'll split by LF (\n) + // then remove any preceding CR (\r) [due to CR+LF] + if i := bytes.IndexByte(data, '\n'); i >= 0 { + // We have a full newline-terminated line. + return i + 1, append(dropCR(data[0:i]), '\n'), nil + } + + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// dropCR drops a terminal \r from the data. +func dropCR(data []byte) []byte { + if len(data) > 0 && data[len(data)-1] == '\r' { + return data[0 : len(data)-1] + } + return data +} diff --git a/vendor/github.com/buildpacks/pack/pkg/project/types/types.go b/vendor/github.com/buildpacks/pack/pkg/project/types/types.go new file mode 100644 index 0000000000..575adcc83f --- /dev/null +++ b/vendor/github.com/buildpacks/pack/pkg/project/types/types.go @@ -0,0 +1,50 @@ +package types + +import ( + "github.com/buildpacks/lifecycle/api" +) + +type Script struct { + API string `toml:"api"` + Inline string `toml:"inline"` + Shell string `toml:"shell"` +} + +type Buildpack struct { + ID string `toml:"id"` + Version string `toml:"version"` + URI string `toml:"uri"` + Script Script `toml:"script"` +} + +type EnvVar struct { + Name string `toml:"name"` + Value string `toml:"value"` +} + +type Build struct { + Include []string `toml:"include"` + Exclude []string `toml:"exclude"` + Buildpacks []Buildpack `toml:"buildpacks"` + Env []EnvVar `toml:"env"` + Builder string `toml:"builder"` +} + +type Project struct { + Name string `toml:"name"` + Version string `toml:"version"` + SourceURL string `toml:"source-url"` + Licenses []License `toml:"licenses"` +} + +type License struct { + Type string `toml:"type"` + URI string `toml:"uri"` +} + +type Descriptor struct { + Project Project `toml:"project"` + Build Build `toml:"build"` + Metadata map[string]interface{} `toml:"metadata"` + SchemaVersion *api.Version +} diff --git a/vendor/github.com/buildpacks/pack/project.toml b/vendor/github.com/buildpacks/pack/project.toml new file mode 100644 index 0000000000..e8c7bb4495 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/project.toml @@ -0,0 +1,3 @@ +[project] +version = "1.0.2" +source-url = "https://github.com/buildpacks/pack" diff --git a/vendor/github.com/buildpacks/pack/version.go b/vendor/github.com/buildpacks/pack/version.go new file mode 100644 index 0000000000..6d858250f9 --- /dev/null +++ b/vendor/github.com/buildpacks/pack/version.go @@ -0,0 +1,6 @@ +package pack + +var ( + // Version is the version of `pack`. It is injected at compile time. + Version = "0.0.0" +) diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt new file mode 100644 index 0000000000..24b53065f4 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/LICENSE.txt @@ -0,0 +1,22 @@ +Copyright (c) 2016 Caleb Spare + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md new file mode 100644 index 0000000000..0982fd25e5 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/README.md @@ -0,0 +1,50 @@ +# xxhash + +[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) + +xxhash is a Go implementation of the 64-bit +[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a +high-quality hashing algorithm that is much faster than anything in the Go +standard library. + +The API is very small, taking its cue from the other hashing packages in the +standard library: + + $ go doc github.com/cespare/xxhash ! + package xxhash // import "github.com/cespare/xxhash" + + Package xxhash implements the 64-bit variant of xxHash (XXH64) as described + at http://cyan4973.github.io/xxHash/. + + func New() hash.Hash64 + func Sum64(b []byte) uint64 + func Sum64String(s string) uint64 + +This implementation provides a fast pure-Go implementation and an even faster +assembly implementation for amd64. + +## Benchmarks + +Here are some quick benchmarks comparing the pure-Go and assembly +implementations of Sum64 against another popular Go XXH64 implementation, +[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): + +| input size | OneOfOne | cespare (purego) | cespare | +| --- | --- | --- | --- | +| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | +| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | +| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | +| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | + +These numbers were generated with: + +``` +$ go test -benchtime 10s -bench '/OneOfOne,' +$ go test -tags purego -benchtime 10s -bench '/xxhash,' +$ go test -benchtime 10s -bench '/xxhash,' +``` + +## Projects using this package + +- [InfluxDB](https://github.com/influxdata/influxdb) +- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/go.mod b/vendor/github.com/cespare/xxhash/go.mod new file mode 100644 index 0000000000..10605a6a5e --- /dev/null +++ b/vendor/github.com/cespare/xxhash/go.mod @@ -0,0 +1,6 @@ +module github.com/cespare/xxhash + +require ( + github.com/OneOfOne/xxhash v1.2.2 + github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 +) diff --git a/vendor/github.com/cespare/xxhash/go.sum b/vendor/github.com/cespare/xxhash/go.sum new file mode 100644 index 0000000000..f6b5542617 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/go.sum @@ -0,0 +1,4 @@ +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go new file mode 100644 index 0000000000..f3eac5ebc0 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/rotate.go @@ -0,0 +1,14 @@ +// +build !go1.9 + +package xxhash + +// TODO(caleb): After Go 1.10 comes out, remove this fallback code. + +func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } +func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } +func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } +func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } +func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } +func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } +func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } +func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go new file mode 100644 index 0000000000..b99612bab8 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/rotate19.go @@ -0,0 +1,14 @@ +// +build go1.9 + +package xxhash + +import "math/bits" + +func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } +func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } +func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } +func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } +func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } +func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } +func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } +func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go new file mode 100644 index 0000000000..f896bd28f0 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash.go @@ -0,0 +1,168 @@ +// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described +// at http://cyan4973.github.io/xxHash/. +package xxhash + +import ( + "encoding/binary" + "hash" +) + +const ( + prime1 uint64 = 11400714785074694791 + prime2 uint64 = 14029467366897019727 + prime3 uint64 = 1609587929392839161 + prime4 uint64 = 9650029242287828579 + prime5 uint64 = 2870177450012600261 +) + +// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where +// possible in the Go code is worth a small (but measurable) performance boost +// by avoiding some MOVQs. Vars are needed for the asm and also are useful for +// convenience in the Go code in a few places where we need to intentionally +// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the +// result overflows a uint64). +var ( + prime1v = prime1 + prime2v = prime2 + prime3v = prime3 + prime4v = prime4 + prime5v = prime5 +) + +type xxh struct { + v1 uint64 + v2 uint64 + v3 uint64 + v4 uint64 + total int + mem [32]byte + n int // how much of mem is used +} + +// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. +func New() hash.Hash64 { + var x xxh + x.Reset() + return &x +} + +func (x *xxh) Reset() { + x.n = 0 + x.total = 0 + x.v1 = prime1v + prime2 + x.v2 = prime2 + x.v3 = 0 + x.v4 = -prime1v +} + +func (x *xxh) Size() int { return 8 } +func (x *xxh) BlockSize() int { return 32 } + +// Write adds more data to x. It always returns len(b), nil. +func (x *xxh) Write(b []byte) (n int, err error) { + n = len(b) + x.total += len(b) + + if x.n+len(b) < 32 { + // This new data doesn't even fill the current block. + copy(x.mem[x.n:], b) + x.n += len(b) + return + } + + if x.n > 0 { + // Finish off the partial block. + copy(x.mem[x.n:], b) + x.v1 = round(x.v1, u64(x.mem[0:8])) + x.v2 = round(x.v2, u64(x.mem[8:16])) + x.v3 = round(x.v3, u64(x.mem[16:24])) + x.v4 = round(x.v4, u64(x.mem[24:32])) + b = b[32-x.n:] + x.n = 0 + } + + if len(b) >= 32 { + // One or more full blocks left. + b = writeBlocks(x, b) + } + + // Store any remaining partial block. + copy(x.mem[:], b) + x.n = len(b) + + return +} + +func (x *xxh) Sum(b []byte) []byte { + s := x.Sum64() + return append( + b, + byte(s>>56), + byte(s>>48), + byte(s>>40), + byte(s>>32), + byte(s>>24), + byte(s>>16), + byte(s>>8), + byte(s), + ) +} + +func (x *xxh) Sum64() uint64 { + var h uint64 + + if x.total >= 32 { + v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = x.v3 + prime5 + } + + h += uint64(x.total) + + i, end := 0, x.n + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(x.mem[i:i+8])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(x.mem[i:i+4])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for i < end { + h ^= uint64(x.mem[i]) * prime5 + h = rol11(h) * prime1 + i++ + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } +func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } + +func round(acc, input uint64) uint64 { + acc += input * prime2 + acc = rol31(acc) + acc *= prime1 + return acc +} + +func mergeRound(acc, val uint64) uint64 { + val = round(0, val) + acc ^= val + acc = acc*prime1 + prime4 + return acc +} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go new file mode 100644 index 0000000000..d617652680 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.go @@ -0,0 +1,12 @@ +// +build !appengine +// +build gc +// +build !purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +// +//go:noescape +func Sum64(b []byte) uint64 + +func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s new file mode 100644 index 0000000000..757f2011f0 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_amd64.s @@ -0,0 +1,233 @@ +// +build !appengine +// +build gc +// +build !purego + +#include "textflag.h" + +// Register allocation: +// AX h +// CX pointer to advance through b +// DX n +// BX loop end +// R8 v1, k1 +// R9 v2 +// R10 v3 +// R11 v4 +// R12 tmp +// R13 prime1v +// R14 prime2v +// R15 prime4v + +// round reads from and advances the buffer pointer in CX. +// It assumes that R13 has prime1v and R14 has prime2v. +#define round(r) \ + MOVQ (CX), R12 \ + ADDQ $8, CX \ + IMULQ R14, R12 \ + ADDQ R12, r \ + ROLQ $31, r \ + IMULQ R13, r + +// mergeRound applies a merge round on the two registers acc and val. +// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. +#define mergeRound(acc, val) \ + IMULQ R14, val \ + ROLQ $31, val \ + IMULQ R13, val \ + XORQ val, acc \ + IMULQ R13, acc \ + ADDQ R15, acc + +// func Sum64(b []byte) uint64 +TEXT ·Sum64(SB), NOSPLIT, $0-32 + // Load fixed primes. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + MOVQ ·prime4v(SB), R15 + + // Load slice. + MOVQ b_base+0(FP), CX + MOVQ b_len+8(FP), DX + LEAQ (CX)(DX*1), BX + + // The first loop limit will be len(b)-32. + SUBQ $32, BX + + // Check whether we have at least one block. + CMPQ DX, $32 + JLT noBlocks + + // Set up initial state (v1, v2, v3, v4). + MOVQ R13, R8 + ADDQ R14, R8 + MOVQ R14, R9 + XORQ R10, R10 + XORQ R11, R11 + SUBQ R13, R11 + + // Loop until CX > BX. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + MOVQ R8, AX + ROLQ $1, AX + MOVQ R9, R12 + ROLQ $7, R12 + ADDQ R12, AX + MOVQ R10, R12 + ROLQ $12, R12 + ADDQ R12, AX + MOVQ R11, R12 + ROLQ $18, R12 + ADDQ R12, AX + + mergeRound(AX, R8) + mergeRound(AX, R9) + mergeRound(AX, R10) + mergeRound(AX, R11) + + JMP afterBlocks + +noBlocks: + MOVQ ·prime5v(SB), AX + +afterBlocks: + ADDQ DX, AX + + // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. + ADDQ $24, BX + + CMPQ CX, BX + JG fourByte + +wordLoop: + // Calculate k1. + MOVQ (CX), R8 + ADDQ $8, CX + IMULQ R14, R8 + ROLQ $31, R8 + IMULQ R13, R8 + + XORQ R8, AX + ROLQ $27, AX + IMULQ R13, AX + ADDQ R15, AX + + CMPQ CX, BX + JLE wordLoop + +fourByte: + ADDQ $4, BX + CMPQ CX, BX + JG singles + + MOVL (CX), R8 + ADDQ $4, CX + IMULQ R13, R8 + XORQ R8, AX + + ROLQ $23, AX + IMULQ R14, AX + ADDQ ·prime3v(SB), AX + +singles: + ADDQ $4, BX + CMPQ CX, BX + JGE finalize + +singlesLoop: + MOVBQZX (CX), R12 + ADDQ $1, CX + IMULQ ·prime5v(SB), R12 + XORQ R12, AX + + ROLQ $11, AX + IMULQ R13, AX + + CMPQ CX, BX + JL singlesLoop + +finalize: + MOVQ AX, R12 + SHRQ $33, R12 + XORQ R12, AX + IMULQ R14, AX + MOVQ AX, R12 + SHRQ $29, R12 + XORQ R12, AX + IMULQ ·prime3v(SB), AX + MOVQ AX, R12 + SHRQ $32, R12 + XORQ R12, AX + + MOVQ AX, ret+24(FP) + RET + +// writeBlocks uses the same registers as above except that it uses AX to store +// the x pointer. + +// func writeBlocks(x *xxh, b []byte) []byte +TEXT ·writeBlocks(SB), NOSPLIT, $0-56 + // Load fixed primes needed for round. + MOVQ ·prime1v(SB), R13 + MOVQ ·prime2v(SB), R14 + + // Load slice. + MOVQ b_base+8(FP), CX + MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below + MOVQ b_len+16(FP), DX + LEAQ (CX)(DX*1), BX + SUBQ $32, BX + + // Load vN from x. + MOVQ x+0(FP), AX + MOVQ 0(AX), R8 // v1 + MOVQ 8(AX), R9 // v2 + MOVQ 16(AX), R10 // v3 + MOVQ 24(AX), R11 // v4 + + // We don't need to check the loop condition here; this function is + // always called with at least one block of data to process. +blockLoop: + round(R8) + round(R9) + round(R10) + round(R11) + + CMPQ CX, BX + JLE blockLoop + + // Copy vN back to x. + MOVQ R8, 0(AX) + MOVQ R9, 8(AX) + MOVQ R10, 16(AX) + MOVQ R11, 24(AX) + + // Construct return slice. + // NOTE: It's important that we don't construct a slice that has a base + // pointer off the end of the original slice, as in Go 1.7+ this will + // cause runtime crashes. (See discussion in, for example, + // https://github.com/golang/go/issues/16772.) + // Therefore, we calculate the length/cap first, and if they're zero, we + // keep the old base. This is what the compiler does as well if you + // write code like + // b = b[len(b):] + + // New length is 32 - (CX - BX) -> BX+32 - CX. + ADDQ $32, BX + SUBQ CX, BX + JZ afterSetBase + + MOVQ CX, ret_base+32(FP) + +afterSetBase: + MOVQ BX, ret_len+40(FP) + MOVQ BX, ret_cap+48(FP) // set cap == len + + RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go new file mode 100644 index 0000000000..c68d13f89e --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_other.go @@ -0,0 +1,75 @@ +// +build !amd64 appengine !gc purego + +package xxhash + +// Sum64 computes the 64-bit xxHash digest of b. +func Sum64(b []byte) uint64 { + // A simpler version would be + // x := New() + // x.Write(b) + // return x.Sum64() + // but this is faster, particularly for small inputs. + + n := len(b) + var h uint64 + + if n >= 32 { + v1 := prime1v + prime2 + v2 := prime2 + v3 := uint64(0) + v4 := -prime1v + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + h = mergeRound(h, v1) + h = mergeRound(h, v2) + h = mergeRound(h, v3) + h = mergeRound(h, v4) + } else { + h = prime5 + } + + h += uint64(n) + + i, end := 0, len(b) + for ; i+8 <= end; i += 8 { + k1 := round(0, u64(b[i:i+8:len(b)])) + h ^= k1 + h = rol27(h)*prime1 + prime4 + } + if i+4 <= end { + h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 + h = rol23(h)*prime2 + prime3 + i += 4 + } + for ; i < end; i++ { + h ^= uint64(b[i]) * prime5 + h = rol11(h) * prime1 + } + + h ^= h >> 33 + h *= prime2 + h ^= h >> 29 + h *= prime3 + h ^= h >> 32 + + return h +} + +func writeBlocks(x *xxh, b []byte) []byte { + v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 + for len(b) >= 32 { + v1 = round(v1, u64(b[0:8:len(b)])) + v2 = round(v2, u64(b[8:16:len(b)])) + v3 = round(v3, u64(b[16:24:len(b)])) + v4 = round(v4, u64(b[24:32:len(b)])) + b = b[32:len(b):len(b)] + } + x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 + return b +} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go new file mode 100644 index 0000000000..dfa15ab7e2 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_safe.go @@ -0,0 +1,10 @@ +// +build appengine + +// This file contains the safe implementations of otherwise unsafe-using code. + +package xxhash + +// Sum64String computes the 64-bit xxHash digest of s. +func Sum64String(s string) uint64 { + return Sum64([]byte(s)) +} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go new file mode 100644 index 0000000000..d2b64e8bb0 --- /dev/null +++ b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go @@ -0,0 +1,30 @@ +// +build !appengine + +// This file encapsulates usage of unsafe. +// xxhash_safe.go contains the safe implementations. + +package xxhash + +import ( + "reflect" + "unsafe" +) + +// Sum64String computes the 64-bit xxHash digest of s. +// It may be faster than Sum64([]byte(s)) by avoiding a copy. +// +// TODO(caleb): Consider removing this if an optimization is ever added to make +// it unnecessary: https://golang.org/issue/2205. +// +// TODO(caleb): We still have a function call; we could instead write Go/asm +// copies of Sum64 for strings to squeeze out a bit more speed. +func Sum64String(s string) uint64 { + // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ + // for some discussion about this unsafe conversion. + var b []byte + bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data + bh.Len = len(s) + bh.Cap = len(s) + return Sum64(b) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go index 332cb67c1f..e7ed3a357e 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/alias.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + // Package v2 reexports a subset of the SDK v2 API. package v2 diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go index a99cd0b706..97f2c4dd74 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go index 1176fad806..8fa999789f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package binding defines interfaces for protocol bindings. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go index 0b6efe636c..16611a3d75 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import "errors" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go index 130327d454..f82c729c44 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go index 17445bfe58..8b51c4c610 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import "github.com/cloudevents/sdk-go/v2/binding/spec" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go index ab153afbba..54c3f1a8c7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package format formats structured events. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go index 9e2b1ec676..2d840025ea 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package format import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go index ce239556c7..e30e150c02 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go index 20ec1ce92f..3c3021d464 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package spec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go index 38d6fddf9f..44c0b3145b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package spec provides spec-version metadata. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go index 5976faf124..110787ddc3 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/match_exact_version.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package spec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go index 4de589185e..7fa0f5840d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package spec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go index 8cf2bbe3e3..60256f2b3c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go index 5f76e3ef2b..339a7833c3 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go index 6ab4f1e5de..de3bec44fa 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding // Transformer is an interface that implements a transformation diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go index ff7cf5fb7c..cb498e62de 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package binding import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go index 088d88f787..0be62d7fc9 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go index 13e0d64302..d48cc20425 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_http.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go index 83edc953bb..82985b8a7f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client // NewObserved produces a new client with the provided transport object and applied diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go index 5d0d7bc941..7bfebf35c8 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go index a6a602bb41..e09962ce6f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package client holds the recommended entry points for interacting with the CloudEvents Golang SDK. The client wraps a selected transport. The client adds validation and defaulting for sending events, and flexible receiver method diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go index d926bd3099..94a4b4e65e 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go index 1d812a78d3..e6d11f55f3 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go index 4ffb83433b..75005d3bb5 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go index a1facfc234..d0fe9dbaa9 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go index 0d2dbf4749..b1ab532d79 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package client import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go index f9843dd61d..fc9ef0315f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/context.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package context import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go index 377cab850f..0b2dcaf709 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package context holds the last resort overrides and fyi objects that can be passed to clients and transports added to context.Context objects. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go index 996f720572..b3087a79fe 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package context import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go index f590d46626..ec17df72e7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/retry.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package context import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go index 591878e5dc..a49522f82f 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event const ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go index 24c4094fc3..cf2152693b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event const ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go index fd68ca5598..3e077740b5 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package datacodec import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go index 9e401534e2..b681af8872 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package datacodec holds the data codec registry and adds known encoders and decoders supporting media types such as `application/json` and `application/xml`. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go index f40869b34b..734ade59fa 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package json import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go index 86772c2e33..33e1323c72 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package json holds the encoder/decoder implementation for `application/json`. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go index 5e6ddc0805..761a101139 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package text import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go index 13316702ec..af10577aae 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package text holds the encoder/decoder implementation for `text/plain`. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go index 13045e03d6..de68ec3dce 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package xml import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go index d90b7c444d..c8d73213f2 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package xml holds the encoder/decoder implementation for `application/xml`. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go index eebbeb4ef1..31c22ce677 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package event provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go index 3860f88739..94b5aa0ada 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go index c85fe7e52c..0f18314827 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go index af87454d8e..2809fed57d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go index 8ec489bb1c..c5f2dc03c7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go index 86ca609b46..9d1aeeb65d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go index 60473e60e3..138c398abc 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_unmarshal.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go index b5759fa4eb..958ecc47d2 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_validation.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go index 00018cbdb6..ddfb1be38c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go index 2d0611215a..a39565afae 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import "time" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go index c626311df5..561f4c5dfb 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go index 5e6dbd8315..2cd27a7057 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go index 94748c67c5..5d664635ec 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go index 6695e15093..01f97586f6 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go index eef6f4ef5f..74f73b029d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go index 1ec29e65e4..5f2aca763b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go index 3d0210fb0d..6c4193f348 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package event import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/go.mod b/vendor/github.com/cloudevents/sdk-go/v2/go.mod index e530c108cf..40a2d282bd 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/go.mod +++ b/vendor/github.com/cloudevents/sdk-go/v2/go.mod @@ -3,7 +3,7 @@ module github.com/cloudevents/sdk-go/v2 go 1.14 require ( - github.com/google/go-cmp v0.4.0 + github.com/google/go-cmp v0.5.0 github.com/google/uuid v1.1.1 github.com/json-iterator/go v1.1.10 github.com/kr/text v0.2.0 // indirect diff --git a/vendor/github.com/cloudevents/sdk-go/v2/go.sum b/vendor/github.com/cloudevents/sdk-go/v2/go.sum index a1b6003825..b893d87b10 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/go.sum +++ b/vendor/github.com/cloudevents/sdk-go/v2/go.sum @@ -2,15 +2,14 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go index d14bf7f984..f826a1841d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package protocol defines interfaces to decouple the client package from protocol implementations. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go index 0c9530d193..a3f335261d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import "fmt" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go index eb004101f0..89222a20cf 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/abuse_protection.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go index 5c04b88afa..3428ea3875 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package http implements an HTTP binding using net/http module */ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go index 527395ab70..055a5c4ddf 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/headers.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go index 44ef429483..e7e51d034b 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go index 2ec0a5812f..55031939c6 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go index 6a4a8dd38b..a17028795d 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -1,6 +1,12 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( + "bytes" "context" "errors" "fmt" @@ -134,7 +140,21 @@ func (p *Protocol) Send(ctx context.Context, m binding.Message, transformers ... return fmt.Errorf("nil Message") } - _, err := p.Request(ctx, m, transformers...) + msg, err := p.Request(ctx, m, transformers...) + if msg != nil { + defer func() { _ = msg.Finish(err) }() + } + if err != nil && !protocol.IsACK(err) { + var res *Result + if protocol.ResultAs(err, &res) { + if message, ok := msg.(*Message); ok { + buf := new(bytes.Buffer) + buf.ReadFrom(message.BodyReader) + errorStr := buf.String() + err = NewResult(res.StatusCode, "%s", errorStr) + } + } + } return err } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go index 70c15b07cc..dacfd30f61 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go index bfb1f5af2c..fb7bcd27ef 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_retry.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go index 149e6872cc..7a0b2626cf 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go index 0f25f70595..f4046d5223 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/retries_result.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go index e0c0d3072a..43ad36180c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go index 9646ca49ff..41385dab14 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package http import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go index 39181771f4..e7a74294d0 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go index 22ae08e096..4a058c9629 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go index b0a87761e8..e44fa432a7 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go index 25f4d56542..eae64e018c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package protocol import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go index c38f711770..8146268746 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import "reflect" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go index b1d9c29da7..cf7a94f35c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + /* Package types implements the CloudEvents type system. diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go index 3ae1c7def8..ff049727dd 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go index 4ada9178d8..bed608094c 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go index e19a1dbb71..22fa123145 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( diff --git a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go index adfbdd687e..f643d0aa51 100644 --- a/vendor/github.com/cloudevents/sdk-go/v2/types/value.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -1,3 +1,8 @@ +/* + Copyright 2021 The CloudEvents Authors + SPDX-License-Identifier: Apache-2.0 +*/ + package types import ( diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 0000000000..584149b6ee --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 0000000000..8915f02773 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 0000000000..05a35228ca --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,93 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with errors.Wrap and error.Wrapf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + + "github.com/pkg/errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 0000000000..209f63bd0f --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(errors.Wrapf(err, format, args...)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = errors.Wrap(cls, msg) + } else { + err = errors.WithStack(cls) + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 0000000000..37b6a7d1c8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,68 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +const ( + // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to + // ensure the formatted time is always the same number of characters. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + + // TextFormat represents the text logging format + TextFormat = "text" + + // JSONFormat represents the JSON logging format + JSONFormat = "json" +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 0000000000..c7657e1869 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,193 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: platform.Variant, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 0000000000..4a7177e313 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "os" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + cpuVariantValue = getCPUVariant() + } + }) + return cpuVariantValue +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 0000000000..6ede94061e --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64": + arch = "amd64" + variant = "" + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 0000000000..cb77fbc9f7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 0000000000..e8a7d5ffa0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 0000000000..0c380e3b7c --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,81 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + imagespec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +type matchComparer struct { + defaults Matcher + osVersionPrefix string +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m matchComparer) Match(p imagespec.Platform) bool { + if m.defaults.Match(p) { + // TODO(windows): Figure out whether OSVersion is deprecated. + return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) + } + return false +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + major, minor, build := windows.RtlGetNtVersionNumbers() + return matchComparer{ + defaults: Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 0000000000..088bdea050 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,278 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) +} + +func joinNotEmpty(s ...string) string { + var ss []string + for _, s := range s { + if s == "" { + continue + } + + ss = append(ss, s) + } + + return strings.Join(ss, "/") +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + // these fields are deprecated, remove them + platform.OSFeatures = nil + platform.OSVersion = "" + + return platform +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 0000000000..708b266899 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,628 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = errors.Wrapf(rErr, "failed to cleanup tmp files: %v", err) + } + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, errors.Wrap(err, "failed to sort") + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, errors.Wrap(err, "failed to sort tar entries") + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReader(in) + if err != nil { + return nil, errors.Wrap(err, "failed to make position watcher") + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, errors.Wrap(err, "failed to parse tar file") + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return errors.Wrapf(errNotFound, "file: %q", name) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReader(r io.ReaderAt) (*countReader, error) { + pos := int64(0) + return &countReader{r: r, cPos: &pos}, nil +} + +type countReader struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReader) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReader) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReader) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 0000000000..6de78b02dc --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 0000000000..3ef0291160 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1026 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behaviour is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(legacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + for _, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + if org, ok := r.m[cleanEntryName(ent.LinkName)]; ok { + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } else { + return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, errors.Wrapf(err, + "failed to parse regular file digest %q", e.Digest) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, errors.Wrapf(err, + "failed to parse chunk digest %q", e.ChunkDigest) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + e, ok = r.m[e.LinkName] + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + fr := &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + } + return io.ReadFull(dr, p) +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse footer") + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + if err := w.closeGz(); err != nil { + return err + } + + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + ent.Offset = w.cw.n + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := ioutil.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod new file mode 100644 index 0000000000..144d022ba7 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.mod @@ -0,0 +1,11 @@ +module github.com/containerd/stargz-snapshotter/estargz + +go 1.16 + +require ( + github.com/klauspost/compress v1.13.6 + github.com/opencontainers/go-digest v1.0.0 + github.com/pkg/errors v0.9.1 + github.com/vbatts/tar-split v0.11.2 + golang.org/x/sync v0.0.0-20201207232520-09787c993a3a +) diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum new file mode 100644 index 0000000000..d3c934ff81 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/go.sum @@ -0,0 +1,22 @@ +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vbatts/tar-split v0.11.2 h1:Via6XqJr0hceW4wff3QRzD5gAk/tatMw/4ZA7cTlIME= +github.com/vbatts/tar-split v0.11.2/go.mod h1:vV3ZuO2yWSVsz+pfFzDG/upWH1JhjOiEaWq6kXyQ3VI= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 0000000000..88e1283d85 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,216 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type gzipCompression struct { + *gzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &gzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressorWithLevel(level int) Compressor { + return &gzipCompressor{level} +} + +type gzipCompressor struct { + compressionLevel int +} + +func (gc *gzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *gzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +type legacyGzipDecompressor struct{} + +func (gz *legacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *legacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *legacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, errors.Wrapf(err, "legacy: failed to get footer gzip reader") + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, errors.Wrapf(err, "legacy: failed to parse toc offset") + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *legacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, "", fmt.Errorf("malformed TOC gzip header: %v", err) + } + defer zr.Close() + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, "", fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, "", fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + return toc, dgstr.Digest(), nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 0000000000..9224e456dd --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2009 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + CountStreams(*testing.T, []byte) int + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingController) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + chunkSize int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl) + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(tt.chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl, wantData, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl, wantData, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { + aGz, err := controller.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := controller.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := ioutil.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := ioutil.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset > 0) == (b.Offset > 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl) + } + }) + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, errors.Wrap(err, "error reading footer") + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, errors.Wrapf(err, "failed to parse footer") + } + + // Decode the TOC JSON + tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, errors.Wrap(err, "failed to parse TOC") + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingController) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + tests := []struct { + name string + chunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // empty tar + TOC + footer + wantNumGzLossLess: 3, // empty tar + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + } + + for _, tt := range tests { + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + w := NewWriterWithCompressor(&stargzBuf, cl) + w.ChunkSize = tt.chunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + got := cl.CountStreams(t, b) + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + if got != wantNumGz { + t.Errorf("number of streams = %d; want %d", got, wantNumGz) + } + + telemetry, checkCalled := newCalledTelemetry() + r, err := Open( + io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), + WithDecompressors(cl), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + if err := checkCalled(); err != nil { + t.Errorf("telemetry failure: %v", err) + } + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +func newCalledTelemetry() (telemetry *Telemetry, check func() error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func() error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 0000000000..384ff7fd7f --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,316 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + NumLink int + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + Writer(w io.Writer) (io.WriteCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range + // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} diff --git a/vendor/github.com/containers/image/v5/LICENSE b/vendor/github.com/containers/image/v5/LICENSE new file mode 100644 index 0000000000..9535635306 --- /dev/null +++ b/vendor/github.com/containers/image/v5/LICENSE @@ -0,0 +1,189 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/image/v5/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md new file mode 100644 index 0000000000..3c4d74eb4d --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/README.md @@ -0,0 +1,2 @@ +This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, +except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go new file mode 100644 index 0000000000..978df7eabb --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go new file mode 100644 index 0000000000..6a86ec64fd --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/normalize.go @@ -0,0 +1,181 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// ParseDockerRef normalizes the image reference following the docker convention. This is added +// mainly for backward compatibility. +// The reference returned can only be either tagged or digested. For reference contains both tag +// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ +// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as +// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. +func ParseDockerRef(ref string) (Named, error) { + named, err := ParseNormalizedNamed(ref) + if err != nil { + return nil, err + } + if _, ok := named.(NamedTagged); ok { + if canonical, ok := named.(Canonical); ok { + // The reference is both tagged and digested, only + // return digested. + newNamed, err := WithName(canonical.Name()) + if err != nil { + return nil, err + } + newCanonical, err := WithDigest(newNamed, canonical.Digest()) + if err != nil { + return nil, err + } + return newCanonical, nil + } + } + return TagNameOnly(named), nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go new file mode 100644 index 0000000000..8c0c23b2fe --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go new file mode 100644 index 0000000000..7860349320 --- /dev/null +++ b/vendor/github.com/containers/image/v5/docker/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go new file mode 100644 index 0000000000..88e123cdd1 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/key.go @@ -0,0 +1,73 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +// Key represents a single key linked to one or more kernel keyrings. +type Key struct { + Name string + + id, ring keyID + size int +} + +// ID returns the 32-bit kernel identifier for a specific key +func (k *Key) ID() int32 { + return int32(k.id) +} + +// Get the key's value as a byte slice +func (k *Key) Get() ([]byte, error) { + var ( + b []byte + err error + sizeRead int + ) + + if k.size == 0 { + k.size = 512 + } + + size := k.size + + b = make([]byte, int(size)) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, int(k.id), b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + k.size = sizeRead + } + } + return b[:k.size], err +} + +// Unlink a key from the keyring it was loaded from (or added to). If the key +// is not linked to any other keyrings, it is destroyed. +func (k *Key) Unlink() error { + _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(k.id), int(k.ring), 0, 0) + return err +} + +// Describe returns a string describing the attributes of a specified key +func (k *Key) Describe() (string, error) { + keyAttr, err := unix.KeyctlString(unix.KEYCTL_DESCRIBE, int(k.id)) + if err != nil { + return "", err + } + return keyAttr, nil +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go new file mode 100644 index 0000000000..91c64a1b8b --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/keyring.go @@ -0,0 +1,117 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface) +package keyctl + +import ( + "unsafe" + + "golang.org/x/sys/unix" +) + +// Keyring is the basic interface to a linux keyctl keyring. +type Keyring interface { + ID + Add(string, []byte) (*Key, error) + Search(string) (*Key, error) +} + +type keyring struct { + id keyID +} + +// ID is unique 32-bit serial number identifiers for all Keys and Keyrings have. +type ID interface { + ID() int32 +} + +// Add a new key to a keyring. The key can be searched for later by name. +func (kr *keyring) Add(name string, key []byte) (*Key, error) { + r, err := unix.AddKey("user", name, key, int(kr.id)) + if err == nil { + key := &Key{Name: name, id: keyID(r), ring: kr.id} + return key, nil + } + return nil, err +} + +// Search for a key by name, this also searches child keyrings linked to this +// one. The key, if found, is linked to the top keyring that Search() was called +// from. +func (kr *keyring) Search(name string) (*Key, error) { + id, err := unix.KeyctlSearch(int(kr.id), "user", name, 0) + if err == nil { + return &Key{Name: name, id: keyID(id), ring: kr.id}, nil + } + return nil, err +} + +// ID returns the 32-bit kernel identifier of a keyring +func (kr *keyring) ID() int32 { + return int32(kr.id) +} + +// SessionKeyring returns the current login session keyring +func SessionKeyring() (Keyring, error) { + return newKeyring(unix.KEY_SPEC_SESSION_KEYRING) +} + +// UserKeyring returns the keyring specific to the current user. +func UserKeyring() (Keyring, error) { + return newKeyring(unix.KEY_SPEC_USER_KEYRING) +} + +// Unlink an object from a keyring +func Unlink(parent Keyring, child ID) error { + _, err := unix.KeyctlInt(unix.KEYCTL_UNLINK, int(child.ID()), int(parent.ID()), 0, 0) + return err +} + +// Link a key into a keyring +func Link(parent Keyring, child ID) error { + _, err := unix.KeyctlInt(unix.KEYCTL_LINK, int(child.ID()), int(parent.ID()), 0, 0) + return err +} + +// ReadUserKeyring reads user keyring and returns slice of key with id(key_serial_t) representing the IDs of all the keys that are linked to it +func ReadUserKeyring() ([]*Key, error) { + var ( + b []byte + err error + sizeRead int + ) + krSize := 4 + size := krSize + b = make([]byte, size) + sizeRead = size + 1 + for sizeRead > size { + r1, err := unix.KeyctlBuffer(unix.KEYCTL_READ, unix.KEY_SPEC_USER_KEYRING, b, size) + if err != nil { + return nil, err + } + + if sizeRead = int(r1); sizeRead > size { + b = make([]byte, sizeRead) + size = sizeRead + sizeRead = size + 1 + } else { + krSize = sizeRead + } + } + keyIDs := getKeyIDsFromByte(b[:krSize]) + return keyIDs, err +} + +func getKeyIDsFromByte(byteKeyIDs []byte) []*Key { + idSize := 4 + var keys []*Key + for idx := 0; idx+idSize <= len(byteKeyIDs); idx = idx + idSize { + tempID := *(*int32)(unsafe.Pointer(&byteKeyIDs[idx])) + keys = append(keys, &Key{id: keyID(tempID)}) + } + return keys +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go new file mode 100644 index 0000000000..ae9697149d --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/perm.go @@ -0,0 +1,33 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +// KeyPerm represents in-kernel access control permission to keys and keyrings +// as a 32-bit integer broken up into four permission sets, one per byte. +// In MSB order, the perms are: Processor, User, Group, Other. +type KeyPerm uint32 + +const ( + // PermOtherAll sets all permission for Other + PermOtherAll KeyPerm = 0x3f << (8 * iota) + // PermGroupAll sets all permission for Group + PermGroupAll + // PermUserAll sets all permission for User + PermUserAll + // PermProcessAll sets all permission for Processor + PermProcessAll +) + +// SetPerm sets the permissions on a key or keyring. +func SetPerm(k ID, p KeyPerm) error { + err := unix.KeyctlSetperm(int(k.ID()), uint32(p)) + return err +} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go new file mode 100644 index 0000000000..196c827607 --- /dev/null +++ b/vendor/github.com/containers/image/v5/internal/pkg/keyctl/sys_linux.go @@ -0,0 +1,25 @@ +// Copyright 2015 Jesse Sipprell. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux + +package keyctl + +import ( + "golang.org/x/sys/unix" +) + +type keyID int32 + +func newKeyring(id keyID) (*keyring, error) { + r1, err := unix.KeyctlGetKeyringID(int(id), true) + if err != nil { + return nil, err + } + + if id < 0 { + r1 = int(id) + } + return &keyring{id: keyID(r1)}, nil +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go new file mode 100644 index 0000000000..6092a9517b --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go @@ -0,0 +1,57 @@ +package internal + +import "io" + +// CompressorFunc writes the compressed stream to the given writer using the specified compression level. +// The caller must call Close() on the stream (even if the input stream does not need closing!). +type CompressorFunc func(io.Writer, *int) (io.WriteCloser, error) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc func(io.Reader) (io.ReadCloser, error) + +// Algorithm is a compression algorithm that can be used for CompressStream. +type Algorithm struct { + name string + prefix []byte + decompressor DecompressorFunc + compressor CompressorFunc +} + +// NewAlgorithm creates an Algorithm instance. +// This function exists so that Algorithm instances can only be created by code that +// is allowed to import this internal subpackage. +func NewAlgorithm(name string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { + return Algorithm{ + name: name, + prefix: prefix, + decompressor: decompressor, + compressor: compressor, + } +} + +// Name returns the name for the compression algorithm. +func (c Algorithm) Name() string { + return c.name +} + +// AlgorithmCompressor returns the compressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmCompressor(algo Algorithm) CompressorFunc { + return algo.compressor +} + +// AlgorithmDecompressor returns the decompressor field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { + return algo.decompressor +} + +// AlgorithmPrefix returns the prefix field of algo. +// This is a function instead of a public method so that it is only callable from by code +// that is allowed to import this internal subpackage. +func AlgorithmPrefix(algo Algorithm) []byte { + return algo.prefix +} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go new file mode 100644 index 0000000000..f96eff2e3c --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go @@ -0,0 +1,13 @@ +package types + +import ( + "github.com/containers/image/v5/pkg/compression/internal" +) + +// DecompressorFunc returns the decompressed stream, given a compressed stream. +// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). +type DecompressorFunc = internal.DecompressorFunc + +// Algorithm is a compression algorithm provided and supported by pkg/compression. +// It can’t be supplied from the outside. +type Algorithm = internal.Algorithm diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go new file mode 100644 index 0000000000..983df41d82 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go @@ -0,0 +1,502 @@ +package config + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/containers/image/v5/types" + "github.com/containers/storage/pkg/homedir" + helperclient "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerAuthConfig struct { + Auth string `json:"auth,omitempty"` + IdentityToken string `json:"identitytoken,omitempty"` +} + +type dockerConfigFile struct { + AuthConfigs map[string]dockerAuthConfig `json:"auths"` + CredHelpers map[string]string `json:"credHelpers,omitempty"` +} + +type authPath struct { + path string + legacyFormat bool +} + +var ( + defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") + xdgConfigHomePath = filepath.FromSlash("containers/auth.json") + xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") + dockerHomePath = filepath.FromSlash(".docker/config.json") + dockerLegacyHomePath = ".dockercfg" + nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json") + + // Note that the keyring support has been disabled as it was causing + // regressions. Before enabling, please revisit TODO(keyring) comments + // which need to be addressed if the need remerged to support the + // kernel keyring. + enableKeyring = false + + // ErrNotLoggedIn is returned for users not logged into a registry + // that they are trying to logout of + ErrNotLoggedIn = errors.New("not logged in") + // ErrNotSupported is returned for unsupported methods + ErrNotSupported = errors.New("not supported") +) + +// SetAuthentication stores the username and password in the auth.json file +func SetAuthentication(sys *types.SystemContext, registry, username, password string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if ch, exists := auths.CredHelpers[registry]; exists { + return false, setAuthToCredHelper(ch, registry, username, password) + } + + // Set the credentials to kernel keyring if enableKeyring is true. + // The keyring might not work in all environments (e.g., missing capability) and isn't supported on all platforms. + // Hence, we want to fall-back to using the authfile in case the keyring failed. + // However, if the enableKeyring is false, we want adhere to the user specification and not use the keyring. + if enableKeyring { + err := setAuthToKernelKeyring(registry, username, password) + if err == nil { + logrus.Debugf("credentials for (%s, %s) were stored in the kernel keyring\n", registry, username) + return false, nil + } + logrus.Debugf("failed to authenticate with the kernel keyring, falling back to authfiles. %v", err) + } + creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) + newCreds := dockerAuthConfig{Auth: creds} + auths.AuthConfigs[registry] = newCreds + return true, nil + }) +} + +// GetAllCredentials returns the registry credentials for all registries stored +// in either the auth.json file or the docker/config.json. +func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { + // Note: we need to read the auth files in the inverse order to prevent + // a priority inversion when writing to the map. + authConfigs := make(map[string]types.DockerAuthConfig) + paths := getAuthFilePaths(sys, homedir.Get()) + for i := len(paths) - 1; i >= 0; i-- { + path := paths[i] + // readJSONFile returns an empty map in case the path doesn't exist. + auths, err := readJSONFile(path.path, path.legacyFormat) + if err != nil { + return nil, errors.Wrapf(err, "error reading JSON file %q", path.path) + } + + for registry, data := range auths.AuthConfigs { + conf, err := decodeDockerAuth(data) + if err != nil { + return nil, err + } + authConfigs[normalizeRegistry(registry)] = conf + } + + // Credential helpers may override credentials from the auth file. + for registry, credHelper := range auths.CredHelpers { + username, password, err := getAuthFromCredHelper(credHelper, registry) + if err != nil { + if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { + continue + } + return nil, err + } + + conf := types.DockerAuthConfig{Username: username, Password: password} + authConfigs[normalizeRegistry(registry)] = conf + } + } + + // TODO(keyring): if we ever re-enable the keyring support, we had to + // query all credentials from the keyring here. + + return authConfigs, nil +} + +// getAuthFilePaths returns a slice of authPaths based on the system context +// in the order they should be searched. Note that some paths may not exist. +// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden +// by tests. +func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { + paths := []authPath{} + pathToAuth, lf, err := getPathToAuth(sys) + if err == nil { + paths = append(paths, authPath{path: pathToAuth, legacyFormat: lf}) + } else { + // Error means that the path set for XDG_RUNTIME_DIR does not exist + // but we don't want to completely fail in the case that the user is pulling a public image + // Logging the error as a warning instead and moving on to pulling the image + logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) + } + xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") + if xdgCfgHome == "" { + xdgCfgHome = filepath.Join(homeDir, ".config") + } + paths = append(paths, authPath{path: filepath.Join(xdgCfgHome, xdgConfigHomePath), legacyFormat: false}) + if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { + paths = append(paths, + authPath{path: filepath.Join(dockerConfig, "config.json"), legacyFormat: false}, + ) + } else { + paths = append(paths, + authPath{path: filepath.Join(homeDir, dockerHomePath), legacyFormat: false}, + ) + } + paths = append(paths, + authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, + ) + return paths +} + +// GetCredentials returns the registry credentials stored in either auth.json +// file or .docker/config.json, including support for OAuth2 and IdentityToken. +// If an entry is not found, an empty struct is returned. +func GetCredentials(sys *types.SystemContext, registry string) (types.DockerAuthConfig, error) { + return getCredentialsWithHomeDir(sys, registry, homedir.Get()) +} + +// getCredentialsWithHomeDir is an internal implementation detail of GetCredentials, +// it exists only to allow testing it with an artificial home directory. +func getCredentialsWithHomeDir(sys *types.SystemContext, registry, homeDir string) (types.DockerAuthConfig, error) { + if sys != nil && sys.DockerAuthConfig != nil { + logrus.Debug("Returning credentials from DockerAuthConfig") + return *sys.DockerAuthConfig, nil + } + + if enableKeyring { + username, password, err := getAuthFromKernelKeyring(registry) + if err == nil { + logrus.Debug("returning credentials from kernel keyring") + return types.DockerAuthConfig{ + Username: username, + Password: password, + }, nil + } + } + + for _, path := range getAuthFilePaths(sys, homeDir) { + authConfig, err := findAuthentication(registry, path.path, path.legacyFormat) + if err != nil { + logrus.Debugf("Credentials not found") + return types.DockerAuthConfig{}, err + } + + if (authConfig.Username != "" && authConfig.Password != "") || authConfig.IdentityToken != "" { + logrus.Debugf("Returning credentials from %s", path.path) + return authConfig, nil + } + } + + logrus.Debugf("Credentials not found") + return types.DockerAuthConfig{}, nil +} + +// GetAuthentication returns the registry credentials stored in +// either auth.json file or .docker/config.json +// If an entry is not found empty strings are returned for the username and password +// +// Deprecated: This API only has support for username and password. To get the +// support for oauth2 in docker registry authentication, we added the new +// GetCredentials API. The new API should be used and this API is kept to +// maintain backward compatibility. +func GetAuthentication(sys *types.SystemContext, registry string) (string, string, error) { + return getAuthenticationWithHomeDir(sys, registry, homedir.Get()) +} + +// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, +// it exists only to allow testing it with an artificial home directory. +func getAuthenticationWithHomeDir(sys *types.SystemContext, registry, homeDir string) (string, string, error) { + auth, err := getCredentialsWithHomeDir(sys, registry, homeDir) + if err != nil { + return "", "", err + } + if auth.IdentityToken != "" { + return "", "", errors.Wrap(ErrNotSupported, "non-empty identity token found and this API doesn't support it") + } + return auth.Username, auth.Password, nil +} + +// RemoveAuthentication deletes the credentials stored in auth.json +func RemoveAuthentication(sys *types.SystemContext, registry string) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + // First try cred helpers. + if ch, exists := auths.CredHelpers[registry]; exists { + return false, deleteAuthFromCredHelper(ch, registry) + } + + // Next if keyring is enabled try kernel keyring + if enableKeyring { + err := deleteAuthFromKernelKeyring(registry) + if err == nil { + logrus.Debugf("credentials for %s were deleted from the kernel keyring", registry) + return false, nil + } + logrus.Debugf("failed to delete credentials from the kernel keyring, falling back to authfiles") + } + + if _, ok := auths.AuthConfigs[registry]; ok { + delete(auths.AuthConfigs, registry) + } else if _, ok := auths.AuthConfigs[normalizeRegistry(registry)]; ok { + delete(auths.AuthConfigs, normalizeRegistry(registry)) + } else { + return false, ErrNotLoggedIn + } + return true, nil + }) +} + +// RemoveAllAuthentication deletes all the credentials stored in auth.json and kernel keyring +func RemoveAllAuthentication(sys *types.SystemContext) error { + return modifyJSON(sys, func(auths *dockerConfigFile) (bool, error) { + if enableKeyring { + err := removeAllAuthFromKernelKeyring() + if err == nil { + logrus.Debugf("removing all credentials from kernel keyring") + return false, nil + } + logrus.Debugf("error removing credentials from kernel keyring") + } + auths.CredHelpers = make(map[string]string) + auths.AuthConfigs = make(map[string]dockerAuthConfig) + return true, nil + }) +} + +// getPathToAuth gets the path of the auth.json file used for reading and writing credentials +// returns the path, and a bool specifies whether the file is in legacy format +func getPathToAuth(sys *types.SystemContext) (string, bool, error) { + return getPathToAuthWithOS(sys, runtime.GOOS) +} + +// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, +// it exists only to allow testing it with an artificial runtime.GOOS. +func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (string, bool, error) { + if sys != nil { + if sys.AuthFilePath != "" { + return sys.AuthFilePath, false, nil + } + if sys.LegacyFormatAuthFilePath != "" { + return sys.LegacyFormatAuthFilePath, true, nil + } + if sys.RootForImplicitAbsolutePaths != "" { + return filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil + } + } + if goOS == "windows" || goOS == "darwin" { + return filepath.Join(homedir.Get(), nonLinuxAuthFilePath), false, nil + } + + runtimeDir := os.Getenv("XDG_RUNTIME_DIR") + if runtimeDir != "" { + // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. + // We are checking for os.IsNotExist here only to give the user better guidance what to do in this special case. + _, err := os.Stat(runtimeDir) + if os.IsNotExist(err) { + // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory + // or made a typo while setting the environment variable, + // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. + return "", false, errors.Wrapf(err, "%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.", runtimeDir) + } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. + return filepath.Join(runtimeDir, xdgRuntimeDirPath), false, nil + } + return fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()), false, nil +} + +// readJSONFile unmarshals the authentications stored in the auth.json file and returns it +// or returns an empty dockerConfigFile data structure if auth.json does not exist +// if the file exists and is empty, readJSONFile returns an error +func readJSONFile(path string, legacyFormat bool) (dockerConfigFile, error) { + var auths dockerConfigFile + + raw, err := ioutil.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + auths.AuthConfigs = map[string]dockerAuthConfig{} + return auths, nil + } + return dockerConfigFile{}, err + } + + if legacyFormat { + if err = json.Unmarshal(raw, &auths.AuthConfigs); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + return auths, nil + } + + if err = json.Unmarshal(raw, &auths); err != nil { + return dockerConfigFile{}, errors.Wrapf(err, "error unmarshaling JSON at %q", path) + } + + if auths.AuthConfigs == nil { + auths.AuthConfigs = map[string]dockerAuthConfig{} + } + if auths.CredHelpers == nil { + auths.CredHelpers = make(map[string]string) + } + + return auths, nil +} + +// modifyJSON writes to auth.json if the dockerConfigFile has been updated +func modifyJSON(sys *types.SystemContext, editor func(auths *dockerConfigFile) (bool, error)) error { + path, legacyFormat, err := getPathToAuth(sys) + if err != nil { + return err + } + if legacyFormat { + return fmt.Errorf("writes to %s using legacy format are not supported", path) + } + + dir := filepath.Dir(path) + if err = os.MkdirAll(dir, 0700); err != nil { + return err + } + + auths, err := readJSONFile(path, false) + if err != nil { + return errors.Wrapf(err, "error reading JSON file %q", path) + } + + updated, err := editor(&auths) + if err != nil { + return errors.Wrapf(err, "error updating %q", path) + } + if updated { + newData, err := json.MarshalIndent(auths, "", "\t") + if err != nil { + return errors.Wrapf(err, "error marshaling JSON %q", path) + } + + if err = ioutil.WriteFile(path, newData, 0600); err != nil { + return errors.Wrapf(err, "error writing to file %q", path) + } + } + + return nil +} + +func getAuthFromCredHelper(credHelper, registry string) (string, string, error) { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds, err := helperclient.Get(p, registry) + if err != nil { + return "", "", err + } + return creds.Username, creds.Secret, nil +} + +func setAuthToCredHelper(credHelper, registry, username, password string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + creds := &credentials.Credentials{ + ServerURL: registry, + Username: username, + Secret: password, + } + return helperclient.Store(p, creds) +} + +func deleteAuthFromCredHelper(credHelper, registry string) error { + helperName := fmt.Sprintf("docker-credential-%s", credHelper) + p := helperclient.NewShellProgramFunc(helperName) + return helperclient.Erase(p, registry) +} + +// findAuthentication looks for auth of registry in path +func findAuthentication(registry, path string, legacyFormat bool) (types.DockerAuthConfig, error) { + auths, err := readJSONFile(path, legacyFormat) + if err != nil { + return types.DockerAuthConfig{}, errors.Wrapf(err, "error reading JSON file %q", path) + } + + // First try cred helpers. They should always be normalized. + if ch, exists := auths.CredHelpers[registry]; exists { + username, password, err := getAuthFromCredHelper(ch, registry) + if err != nil { + return types.DockerAuthConfig{}, err + } + + return types.DockerAuthConfig{ + Username: username, + Password: password, + }, nil + } + + // I'm feeling lucky + if val, exists := auths.AuthConfigs[registry]; exists { + return decodeDockerAuth(val) + } + + // bad luck; let's normalize the entries first + registry = normalizeRegistry(registry) + normalizedAuths := map[string]dockerAuthConfig{} + for k, v := range auths.AuthConfigs { + normalizedAuths[normalizeRegistry(k)] = v + } + + if val, exists := normalizedAuths[registry]; exists { + return decodeDockerAuth(val) + } + + return types.DockerAuthConfig{}, nil +} + +// decodeDockerAuth decodes the username and password, which is +// encoded in base64. +func decodeDockerAuth(conf dockerAuthConfig) (types.DockerAuthConfig, error) { + decoded, err := base64.StdEncoding.DecodeString(conf.Auth) + if err != nil { + return types.DockerAuthConfig{}, err + } + + parts := strings.SplitN(string(decoded), ":", 2) + if len(parts) != 2 { + // if it's invalid just skip, as docker does + return types.DockerAuthConfig{}, nil + } + + user := parts[0] + password := strings.Trim(parts[1], "\x00") + return types.DockerAuthConfig{ + Username: user, + Password: password, + IdentityToken: conf.IdentityToken, + }, nil +} + +// convertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry/auth.go +func convertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +func normalizeRegistry(registry string) string { + normalized := convertToHostname(registry) + switch normalized { + case "registry-1.docker.io", "docker.io": + return "index.docker.io" + } + return normalized +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go new file mode 100644 index 0000000000..1531d69431 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_linux.go @@ -0,0 +1,115 @@ +package config + +import ( + "fmt" + "strings" + + "github.com/containers/image/v5/internal/pkg/keyctl" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const keyDescribePrefix = "container-registry-login:" + +func getAuthFromKernelKeyring(registry string) (string, string, error) { + userkeyring, err := keyctl.UserKeyring() + if err != nil { + return "", "", err + } + key, err := userkeyring.Search(genDescription(registry)) + if err != nil { + return "", "", err + } + authData, err := key.Get() + if err != nil { + return "", "", err + } + parts := strings.SplitN(string(authData), "\x00", 2) + if len(parts) != 2 { + return "", "", nil + } + return parts[0], parts[1], nil +} + +func deleteAuthFromKernelKeyring(registry string) error { + userkeyring, err := keyctl.UserKeyring() + + if err != nil { + return err + } + key, err := userkeyring.Search(genDescription(registry)) + if err != nil { + return err + } + return key.Unlink() +} + +func removeAllAuthFromKernelKeyring() error { + keys, err := keyctl.ReadUserKeyring() + if err != nil { + return err + } + + userkeyring, err := keyctl.UserKeyring() + if err != nil { + return err + } + + for _, k := range keys { + keyAttr, err := k.Describe() + if err != nil { + return err + } + // split string "type;uid;gid;perm;description" + keyAttrs := strings.SplitN(keyAttr, ";", 5) + if len(keyAttrs) < 5 { + return errors.Errorf("Key attributes of %d are not available", k.ID()) + } + keyDescribe := keyAttrs[4] + if strings.HasPrefix(keyDescribe, keyDescribePrefix) { + err := keyctl.Unlink(userkeyring, k) + if err != nil { + return errors.Wrapf(err, "error unlinking key %d", k.ID()) + } + logrus.Debugf("unlinked key %d:%s", k.ID(), keyAttr) + } + } + return nil +} + +func setAuthToKernelKeyring(registry, username, password string) error { + keyring, err := keyctl.SessionKeyring() + if err != nil { + return err + } + id, err := keyring.Add(genDescription(registry), []byte(fmt.Sprintf("%s\x00%s", username, password))) + if err != nil { + return err + } + + // sets all permission(view,read,write,search,link,set attribute) for current user + // it enables the user to search the key after it linked to user keyring and unlinked from session keyring + err = keyctl.SetPerm(id, keyctl.PermUserAll) + if err != nil { + return err + } + // link the key to userKeyring + userKeyring, err := keyctl.UserKeyring() + if err != nil { + return errors.Wrapf(err, "error getting user keyring") + } + err = keyctl.Link(userKeyring, id) + if err != nil { + return errors.Wrapf(err, "error linking the key to user keyring") + } + // unlink the key from session keyring + err = keyctl.Unlink(keyring, id) + if err != nil { + return errors.Wrapf(err, "error unlinking the key from session keyring") + } + return nil +} + +func genDescription(registry string) string { + return fmt.Sprintf("%s%s", keyDescribePrefix, registry) +} diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go new file mode 100644 index 0000000000..9b0e8bee25 --- /dev/null +++ b/vendor/github.com/containers/image/v5/pkg/docker/config/config_unsupported.go @@ -0,0 +1,20 @@ +// +build !linux +// +build !386 !amd64 + +package config + +func getAuthFromKernelKeyring(registry string) (string, string, error) { + return "", "", ErrNotSupported +} + +func deleteAuthFromKernelKeyring(registry string) error { + return ErrNotSupported +} + +func setAuthToKernelKeyring(registry, username, password string) error { + return ErrNotSupported +} + +func removeAllAuthFromKernelKeyring() error { + return ErrNotSupported +} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go new file mode 100644 index 0000000000..8655ca4437 --- /dev/null +++ b/vendor/github.com/containers/image/v5/types/types.go @@ -0,0 +1,680 @@ +package types + +import ( + "context" + "io" + "time" + + "github.com/containers/image/v5/docker/reference" + compression "github.com/containers/image/v5/pkg/compression/types" + digest "github.com/opencontainers/go-digest" + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ImageTransport is a top-level namespace for ways to to store/load an image. +// It should generally correspond to ImageSource/ImageDestination implementations. +// +// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. +// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS +// (or, even, IPv4 or IPv6). +// +// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. +// For example, several different ImageTransport implementations may be based on local filesystem paths, +// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) +// +// See also transports.KnownTransports. +type ImageTransport interface { + // Name returns the name of the transport, which must be unique among other transports. + Name() string + // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. + ParseReference(reference string) (ImageReference, error) + // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys + // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). + // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. + // scope passed to this function will not be "", that value is always allowed. + ValidatePolicyConfigurationScope(scope string) error +} + +// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. +// +// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening +// within an ImageTransport.ParseReference() or equivalent API creating the reference object. +// That's also why the various identification/formatting methods of this type do not support returning errors. +// +// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside +// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. +type ImageReference interface { + Transport() ImageTransport + // StringWithinTransport returns a string representation of the reference, which MUST be such that + // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. + // NOTE: The returned string is not promised to be equal to the original input to ParseReference; + // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa. + // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; + // instead, see transports.ImageName(). + StringWithinTransport() string + + // DockerReference returns a Docker reference associated with this reference + // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, + // not e.g. after redirect or alias processing), or nil if unknown/not applicable. + DockerReference() reference.Named + + // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. + // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; + // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical + // (i.e. various references with exactly the same semantics should return the same configuration identity) + // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but + // not required/guaranteed that it will be a valid input to Transport().ParseReference(). + // Returns "" if configuration identities for these references are not supported. + PolicyConfigurationIdentity() string + + // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search + // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed + // in order, terminating on first match, and an implicit "" is always checked at the end. + // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), + // and each following element to be a prefix of the element preceding it. + PolicyConfigurationNamespaces() []string + + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. + // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, + // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) + // NewImageSource returns a types.ImageSource for this reference. + // The caller must call .Close() on the returned ImageSource. + NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) + // NewImageDestination returns a types.ImageDestination for this reference. + // The caller must call .Close() on the returned ImageDestination. + NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) + + // DeleteImage deletes the named image from the registry, if supported. + DeleteImage(ctx context.Context, sys *SystemContext) error +} + +// LayerCompression indicates if layers must be compressed, decompressed or preserved +type LayerCompression int + +const ( + // PreserveOriginal indicates the layer must be preserved, ie + // no compression or decompression. + PreserveOriginal LayerCompression = iota + // Decompress indicates the layer must be decompressed + Decompress + // Compress indicates the layer must be compressed + Compress +) + +// LayerCrypto indicates if layers have been encrypted or decrypted or none +type LayerCrypto int + +const ( + // PreserveOriginalCrypto indicates the layer must be preserved, ie + // no encryption/decryption + PreserveOriginalCrypto LayerCrypto = iota + // Encrypt indicates the layer is encrypted + Encrypt + // Decrypt indicates the layer is decrypted + Decrypt +) + +// BlobInfo collects known information about a blob (layer/config). +// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. +type BlobInfo struct { + Digest digest.Digest // "" if unknown. + Size int64 // -1 if unknown + URLs []string + Annotations map[string]string + MediaType string + // CompressionOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer's "compressed or not" should be preserved, + // possibly while changing the compression algorithm from one to another, + // or if it should be compressed or decompressed. The field defaults to + // preserve the original layer's compressedness. + // TODO: To remove together with CryptoOperation in re-design to remove + // field out out of BlobInfo. + CompressionOperation LayerCompression + // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct + // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be + // set when `CompressionOperation == Compress` and MAY be set when + // `CompressionOperation == PreserveOriginal` and the compression type is + // being changed for an already-compressed layer. + CompressionAlgorithm *compression.Algorithm + // CryptoOperation is used in Image.UpdateLayerInfos to instruct + // whether the original layer was encrypted/decrypted + // TODO: To remove together with CompressionOperation in re-design to + // remove field out out of BlobInfo. + CryptoOperation LayerCrypto +} + +// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. +// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest). +// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICTransportScope struct { + Opaque string +} + +// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. +// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob +// can look it up using BlobInfoCache.CandidateLocations. +// +// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different +// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, +// at least by not failing hard when encountering unknown data. +type BICLocationReference struct { + Opaque string +} + +// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. +type BICReplacementCandidate struct { + Digest digest.Digest + Location BICLocationReference +} + +// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies. +// +// It records two kinds of data: +// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: +// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. +// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion), +// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ +// +// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known +// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). +// +// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently +// compress/decompress blobs for their own purposes. +// +// - Known blob locations, managed by individual transports: +// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), +// recording transport-specific information that allows the transport to reuse the blob in the future; +// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. +// +// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs +// can be directly reused within a registry, or mounted across registries within a registry server.) +// +// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; +// users of the cache should just fall back to copying the blobs the usual way. +// +// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by +// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. +type BlobInfoCache interface { + // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. + // May return anyDigest if it is known to be uncompressed. + // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). + UncompressedDigest(anyDigest digest.Digest) digest.Digest + // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. + // It’s allowed for anyDigest == uncompressed. + // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. + // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. + // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) + RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) + + // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, + // and can be reused given the opaque location data. + RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) + // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused + // within the specified (transport scope) (if they still exist, which is not guaranteed). + // + // If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute, + // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same + // uncompressed digest. + CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate +} + +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). +// This is primarily useful for copying images around; for examining their properties, Image (below) +// is usually more useful. +// Each ImageSource should eventually be closed by calling Close(). +// +// WARNING: Various methods which return an object identified by digest generally do not +// validate that the returned data actually matches that digest; this is the caller’s responsibility. +type ImageSource interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Close removes resources associated with an initialized ImageSource, if any. + Close() error + // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). + // It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) + // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. + GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) + // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. + HasThreadSafeGetBlob() bool + // GetSignatures returns the image's signatures. It may use a remote (= slow) service. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer + // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() + // to read the image's layers. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) +} + +// ImageDestination is a service, possibly remote (= slow), to store components of a single image. +// +// There is a specific required order for some of the calls: +// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) +// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) +// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. +// +// Each ImageDestination should eventually be closed by calling Close(). +type ImageDestination interface { + // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, + // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. + Reference() ImageReference + // Close removes resources associated with an initialized ImageDestination, if any. + Close() error + + // SupportedManifestMIMETypes tells which manifest mime types the destination supports + // If an empty slice or nil it's returned, then any mime type can be tried to upload + SupportedManifestMIMETypes() []string + // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. + // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. + SupportsSignatures(ctx context.Context) error + // DesiredLayerCompression indicates the kind of compression to apply on layers + DesiredLayerCompression() LayerCompression + // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually + // uploaded to the image destination, true otherwise. + AcceptsForeignLayerURLs() bool + // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. + MustMatchRuntimeOS() bool + // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), + // and would prefer to receive an unmodified manifest instead of one modified for the destination. + // Does not make a difference if Reference().DockerReference() is nil. + IgnoresEmbeddedDockerReference() bool + + // PutBlob writes contents of stream and returns data representing the result. + // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. + // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. + // May update cache. + // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available + // to any other readers for download using the supplied digest. + // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. + PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) + // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. + HasThreadSafePutBlob() bool + // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination + // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). + // info.Digest must not be empty. + // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. + // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may + // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be + // reflected in the manifest that will be written. + // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. + // May use and/or update cache. + TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) + // PutManifest writes manifest to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated + // by `manifest.Digest()`. + // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. + // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), + // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. + PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error + // PutSignatures writes a set of signatures to the destination. + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for + // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. + // MUST be called after PutManifest (signatures may reference manifest contents). + PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error + // Commit marks the process of storing the image as successful and asks for the image to be persisted. + // WARNING: This does not have any transactional semantics: + // - Uploaded data MAY be visible to others before Commit() is called + // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) + Commit(ctx context.Context, unparsedToplevel UnparsedImage) error +} + +// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, +// refuses specifically this manifest type, but may accept a different manifest type. +type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. + Err error +} + +func (e ManifestTypeRejectedError) Error() string { + return e.Err.Error() +} + +// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. +// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, +// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. +// This also makes the UnparsedImage→Image conversion an explicitly visible step. +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +type UnparsedImage interface { + // Reference returns the reference used to set up this source, _as specified by the user_ + // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. + Reference() ImageReference + // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. + Manifest(ctx context.Context) ([]byte, string, error) + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. + Signatures(ctx context.Context) ([][]byte, error) +} + +// Image is the primary API for inspecting properties of images. +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. +type Image interface { + // Note that Reference may return nil in the return value of UpdatedImage! + UnparsedImage + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. + ConfigInfo() BlobInfo + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. + // The result is cached; it is OK to call this however often you need. + ConfigBlob(context.Context) ([]byte, error) + // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about + // layers in the resulting configuration isn't guaranteed to be returned to due how + // old image manifests work (docker v2s1 especially). + OCIConfig(context.Context) (*v1.Image, error) + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []BlobInfo + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy(context.Context) ([]BlobInfo, error) + // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. + // It returns false if the manifest does not embed a Docker reference. + // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) + EmbeddedDockerReferenceConflicts(ref reference.Named) bool + // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. + Inspect(context.Context) (*ImageInspectInfo, error) + // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. + // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute + // (most importantly it forces us to download the full layers even if they are already present at the destination). + UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool + // UpdatedImage returns a types.Image modified according to options. + // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. + // This does not change the state of the original Image object. + // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if + // manifests of type options.ManifestMIMEType can not include layers that are compressed + // in accordance with the CompressionOperation and CompressionAlgorithm specified in one + // or more options.LayerInfos items, though retrying with a different + // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm + // values might succeed. + UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) + // SupportsEncryption returns an indicator that the image supports encryption + // + // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since + // the process of updating a manifest between different manifest types was to update then convert. + // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 + SupportsEncryption(ctx context.Context) bool + // Size returns an approximation of the amount of disk space which is consumed by the image in its current + // location. If the size is not known, -1 will be returned. + Size() (int64, error) +} + +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + +// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest +type ManifestUpdateOptions struct { + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. + EmbeddedDockerReference reference.Named + ManifestMIMEType string + // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. + InformationOnly ManifestUpdateInformation +} + +// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here +// only to make writing struct literals possible. +type ManifestUpdateInformation struct { + Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) + LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. +} + +// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. +// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported +// for other manifest types. +type ImageInspectInfo struct { + Tag string + Created *time.Time + DockerVersion string + Labels map[string]string + Architecture string + Variant string + Os string + Layers []string + Env []string +} + +// DockerAuthConfig contains authorization information for connecting to a registry. +// the value of Username and Password can be empty for accessing the registry anonymously +type DockerAuthConfig struct { + Username string + Password string + // IdentityToken can be used as an refresh_token in place of username and + // password to obtain the bearer/access token in oauth2 flow. If identity + // token is set, password should not be set. + // Ref: https://docs.docker.com/registry/spec/auth/oauth/ + IdentityToken string +} + +// OptionalBool is a boolean with an additional undefined value, which is meant +// to be used in the context of user input to distinguish between a +// user-specified value and a default value. +type OptionalBool byte + +const ( + // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. + OptionalBoolUndefined OptionalBool = iota + // OptionalBoolTrue represents the boolean true. + OptionalBoolTrue + // OptionalBoolFalse represents the boolean false. + OptionalBoolFalse +) + +// NewOptionalBool converts the input bool into either OptionalBoolTrue or +// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. +func NewOptionalBool(b bool) OptionalBool { + o := OptionalBoolFalse + if b { + o = OptionalBoolTrue + } + return o +} + +// ShortNameMode defines the mode of short-name resolution. +// +// The use of unqualified-search registries entails an ambiguity as it's +// unclear from which registry a given image, referenced by a short name, may +// be pulled from. +// +// The ShortNameMode type defines how short names should resolve. +type ShortNameMode int + +const ( + ShortNameModeInvalid ShortNameMode = iota + // Use all configured unqualified-search registries without prompting + // the user. + ShortNameModeDisabled + // If stdout and stdin are a TTY, prompt the user to select a configured + // unqualified-search registry. Otherwise, use all configured + // unqualified-search registries. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModePermissive + // Always prompt the user to select a configured unqualified-search + // registry. Throw an error if stdout or stdin is not a TTY as + // prompting isn't possible. + // + // Note that if only one unqualified-search registry is set, it will be + // used without prompting. + ShortNameModeEnforcing +) + +// SystemContext allows parameterizing access to implicitly-accessed resources, +// like configuration files in /etc and users' login state in their home directory. +// Various components can share the same field only if their semantics is exactly +// the same; if in doubt, add a new field. +// It is always OK to pass nil instead of a SystemContext. +type SystemContext struct { + // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). + // Not used for any of the more specific path overrides available in this struct. + // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). + // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . + // and there is no need to worry about the environment.) + // NOTE: This does NOT affect paths starting by $HOME. + RootForImplicitAbsolutePaths string + + // === Global configuration overrides === + // If not "", overrides the system's default path for signature.Policy configuration. + SignaturePolicyPath string + // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) + RegistriesDirPath string + // Path to the system-wide registries configuration file + SystemRegistriesConfPath string + // Path to the system-wide registries configuration directory + SystemRegistriesConfDirPath string + // Path to the user-specific short-names configuration file + UserShortNameAliasConfPath string + // If set, short-name resolution in pkg/shortnames must follow the specified mode + ShortNameMode *ShortNameMode + // If not "", overrides the default path for the authentication file, but only new format files + AuthFilePath string + // if not "", overrides the default path for the authentication file, but with the legacy format; + // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; + // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) + // in locations other than the home dir; openshift components should then set this field in those cases; + // this field is ignored if `AuthFilePath` is set (we favor the newer format); + // only reading of this data is supported; + LegacyFormatAuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string + // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match. + VariantChoice string + // If not "", overrides the system's default directory containing a blob info cache. + BlobInfoCacheDir string + // Additional tags when creating or copying a docker-archive. + DockerArchiveAdditionalTags []reference.NamedTagged + // If not "", overrides the temporary directory to use for storing big files + BigFilesTemporaryDir string + + // === OCI.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when downloading OCI image layers. + OCICertPath string + // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string + // Allow UnCompress image layer for OCI image layer + OCIAcceptUncompressedLayers bool + + // === docker.Transport overrides === + // If not "", a directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client ceritificate key + // (ending with ".key") used when talking to a Docker Registry. + DockerCertPath string + // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. + // Ignored if DockerCertPath is non-empty. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify OptionalBool + // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials + // Ignored if DockerBearerRegistryToken is non-empty. + DockerAuthConfig *DockerAuthConfig + // if not "", the library uses this registry token to authenticate to the registry + DockerBearerRegistryToken string + // if not "", an User-Agent header is added to each request when contacting a registry. + DockerRegistryUserAgent string + // if true, a V1 ping attempt isn't done to give users a better error. Default is false. + // Note that this field is used mainly to integrate containers/image into projectatomic/docker + // in order to not break any existing docker's integration tests. + DockerDisableV1Ping bool + // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list + DockerDisableDestSchema1MIMETypes bool + // If true, the physical pull source of docker transport images logged as info level + DockerLogMirrorChoice bool + // Directory to use for OSTree temporary files + OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool + + // CompressionFormat is the format to use for the compression of the blobs + CompressionFormat *compression.Algorithm + // CompressionLevel specifies what compression level is used + CompressionLevel *int +} + +// ProgressEvent is the type of events a progress reader can produce +// Warning: new event types may be added any time. +type ProgressEvent uint + +const ( + // ProgressEventNewArtifact will be fired on progress reader setup + ProgressEventNewArtifact ProgressEvent = iota + + // ProgressEventRead indicates that the artifact download is currently in + // progress + ProgressEventRead + + // ProgressEventDone is fired when the data transfer has been finished for + // the specific artifact + ProgressEventDone + + // ProgressEventSkipped is fired when the artifact has been skipped because + // its already available at the destination + ProgressEventSkipped +) + +// ProgressProperties is used to pass information from the copy code to a monitor which +// can use the real-time information to produce output or react to changes. +type ProgressProperties struct { + // The event indicating what + Event ProgressEvent + + // The artifact which has been updated in this interval + Artifact BlobInfo + + // The currently downloaded size in bytes + // Increases from 0 to the final Artifact size + Offset uint64 + + // The additional offset which has been downloaded inside the last update + // interval. Will be reset after each ProgressEventRead event. + OffsetUpdate uint64 +} diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS new file mode 100644 index 0000000000..11cd83d14e --- /dev/null +++ b/vendor/github.com/containers/storage/AUTHORS @@ -0,0 +1,1522 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Huslage +Aaron Lehmann +Aaron Welch +Abel Muiño +Abhijeet Kasurde +Abhinav Ajgaonkar +Abhishek Chanda +Abin Shahab +Adam Miller +Adam Singer +Aditi Rajagopal +Aditya +Adria Casas +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akihiro Suda +Al Tobey +alambike +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre González +Alexandru Sfirlogea +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Ali Dehghani +Allen Madsen +Allen Sun +almoehi +Alvin Richards +amangoel +Amen Belayneh +Amit Bakshi +Amit Krishnan +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anchal Agrawal +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Luzzardi +Andrea Turli +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew Munsell +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Ankush Agarwal +Anonmily +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +ArikaChen +Arnaud Porterie +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asbjørn Enge +averagehuman +Avi Das +Avi Miller +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Firshman +Ben Golub +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benoit Chesneau +Bernerd Schaefer +Bert Goethals +Bharath Thiruveedula +Bhiraj Butala +Bill W +bin liu +Blake Geno +Boaz Shuster +bobby abbott +boucher +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +buddhamagnet +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Carl Henrik Lunde +Carl X. Su +Carlos Alexandro Becker +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander G +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Hanxiao +cheney90 +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dituri +Chris Fordham +Chris Khoo +Chris McKinnel +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Wahl +Chris Weyl +chrismckinnel +Christian Berendt +Christian Böhme +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +ChristoperBiscardi +Christophe Mehay +Christophe Troestler +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Perez +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Coenraad Loubser +Colin Dunklau +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Cory Forsyth +cressie176 +Cristian Staretu +cristiano balducci +Cruceru Calin-Cristian +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Walsh +Dan Williams +Daniel Antlinger +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Hiltgen +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Von Fange +Daniel YC Lin +Daniel Zhang +Daniel, Dao Quang Minh +Danny Berger +Danny Yates +Darren Coxall +Darren Shepherd +Darren Stahl +Dave Barboza +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Corking +David Cramer +David Currie +David Davis +David Gageot +David Gebler +David Lawrence +David Mackey +David Mat +David Mcanulty +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Xia +David Young +Davide Ceretti +Dawn Chen +dcylabs +decadent +deed02392 +Deng Guangxing +Deni Bertovic +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devvyn Murphy +Dharmit Shah +Dieter Reuter +Dima Stopel +Dimitri John Ledkov +Dimitry Andric +Dinesh Subhraveti +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitry Demeshchuk +Dmitry Gusev +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donovan Jones +Doug Davis +Doug MacEachern +Doug Tangren +Dr Nic Williams +dragon788 +Dražen Lučanin +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivind Uggedal +Elan Ruusamäe +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Windisch +Eric Yang +Eric-Olivier Lamey +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Euan +Eugene Yakubovich +eugenkrizo +evalle +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Fabiano Rosas +Fabio Falci +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangyuan Gao <21551127@zju.edu.cn> +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Schindler +Ferenc Szabo +Fernando +Fero Volar +Filipe Brandenburger +Filipe Oliveira +fl0yd +Flavio Castelli +FLGMwt +Florian +Florian Klein +Florian Maier +Florian Weingarten +Florin Asavoaie +Francesc Campoy +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +fy2462 +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Monroy +GabrielNicolasAvellaneda +Galen Sampson +Gareth Rushgrove +Garrett Barboza +Gaurav +gautam, prasanna +GennadySpb +Geoffrey Bachelet +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gianluca Borello +Gildas Cuisinier +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Thornton +grossws +grunny +gs11 +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Gurjeet Singh +Guruprasad +gwx296173 +Günter Zöchbauer +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harley Laue +Harold Cooper +Harry Zhang +He Simei +heartlock <21521209@zju.edu.cn> +Hector Castro +Henning Sprang +Hobofan +Hollie Teal +Hong Xu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +hyp3rdino +Hyzhou <1187766782@qq.com> +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Lee +Ian Main +Ian Truslove +Iavael +Icaro Seara +Igor Dolzhikov +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +ILYA Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Isaac Dupree +Isabel Jimenez +Isao Jonas +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +J Bruni +J. Nunn +Jack Danger Canty +Jacob Atzen +Jacob Edelman +Jake Champlin +Jake Moshenko +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nugent +James Turnbull +Jamie Hannaford +Jamshid Afshar +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Paul Calderone +Jean-Tiare Le Bigot +Jeff Anderson +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +jgeiger +Jhon Honce +Jian Zhang +jianbosun +Jilles Oldenbeuving +Jim Alateras +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +jimmyxian +Jinsoo Park +Jiri Popelka +Jiří Župka +jjy +jmzwcn +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Howard (VM) +John OBrien III +John Starks +John Tims +John Warwick +John Willis +Jon Wedaman +Jonas Pfenniger +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Dowland +Jonathan Lebon +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Joost Cassee +Jordan +Jordan Arentsen +Jordan Sissel +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Josh +Josh Hawn +Josh Poimboeuf +Josiah Kiehl +José Tomás Albornoz +JP +jrabbit +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Pervillé +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justin Cormack +Justin Force +Justin Plock +Justin Simonelis +Justin Terry +Jyrki Puttonen +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu(Kennan) +Kamil Domański +kamjar gerami +Kanstantsin Shautsou +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +kayrus +Ke Xu +Keli Hu +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickael Laventure +Kenjiro Nakayama +Kent Johnson +Kevin "qwazerty" Houdebert +Kevin Clark +Kevin J. Lynagh +Kevin Menard +Kevin P. Kucharczyk +Kevin Shi +Kevin Wallace +Kevin Yap +kevinmeredith +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Pelykh +Krasimir Georgiev +Kristian Haugene +Kristina Zabunova +krrg +Kun Zhang +Kunal Kushwaha +Kyle Conroy +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +lalyos +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Laszlo Meszaros +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee, Meng-Han +leeplay +Lei Jitang +Len Weincier +Lennie +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Marshall +Lewis Peckover +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +liaoqingwei +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +LIZAO LI +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Louis Opter +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Luis Martínez de Bartolomé Izquierdo +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +lukemarsden +Lynda O'Leary +Lénaïc Huard +Ma Shimiao +Mabin +Madhav Puri +Madhu Venugopal +Mageee <21521230.zju.edu.cn> +Mahesh Tiyyagura +malnick +Malte Janduda +manchoz +Manfred Touron +Manfred Zabarauskas +mansinahar +Manuel Meurer +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcelo Salazar +Marco Hennings +Marcus Farkas +Marcus Linke +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark McGranaghan +Mark McKinstry +Mark West +Marko Mikulicic +Marko Tibold +Markus Fix +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Redmond +Mary Anthony +Masahito Zembutsu +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Le Marec - Pasquet +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt McCormick +Matt Moore +Matt Robenolt +Matthew Heon +Matthew Mayer +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +mattymo +mattyw +Mauricio Garavaglia +mauriyouth +Max Shytikov +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mengdi Gao +Mert Yazıcıoğlu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Käufl +Michael Neale +Michael Prokop +Michael Scharf +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minar +Michaël Pailloncy +Michał Czeraszkiewicz +Michiel@unhosted +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Gaffney +Mike Goelzer +Mike Leone +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miloslav Trmač +mingqing +Mingzhen Feng +Mitch Capper +mlarcher +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohit Soni +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mqliang +Mrunal Patel +msabansal +mschurenko +muge +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Neal McBurnett +Nelson Chen +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Irvine +Nick Parker +Nick Payne +Nick Stenning +Nick Stinemates +Nicolas Borboën +Nicolas De loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolás Hock Isaza +Nigel Poulton +NikolaMandic +nikolas +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +noducks +Nolan Darilek +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +OddBloke +odk- +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +Ole Reifschneider +Oliver Neal +Olivier Gambier +Olle Jonsson +Oriol Francès +Otto Kekäläinen +oyld +ozlerhakan +paetling +pandrew +panticz +Paolo G. Giarrusso +Pascal Borreli +Pascal Hartig +Patrick Devine +Patrick Hemmer +Patrick Stapleton +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Hammond +Paul Jimenez +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Pavel Lobashov +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Peeyush Gupta +Peggy Li +Pei Su +Penghan Wang +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Phil +Phil Estes +Phil Spitler +Philip Monroe +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Prasanna Gautam +Prayag Verma +Przemek Hejman +pysqz +qg <1373319223@qq.com> +qhuang +Qiang Huang +qq690388648 <690388648@qq.com> +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Rajat Pandit +Rajdeep Dua +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon van Alteren +Ray Tsang +ReadmeCritic +Recursive Madman +Regan McCooey +Remi Rampin +Renato Riccieri Santos Zannon +resouer +rgstephens +Rhys Hiltner +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Stern +Robert Wallis +Roberto G. Hashioka +Robin Naundorf +Robin Schneider +Robin Speekenbrink +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Strashkin +Ron Smits +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Rozhnov Alexandr +rsmoorthy +Rudolph Gottesheim +Rui Lopes +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +RyanDeng +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Satnam Singh +satoru +Satoshi Amemiya +scaleoutsean +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean OMeara +Sean P. Kane +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sevki Hasirci +Shane Canon +Shane da Silva +shaunol +Shawn Landden +Shawn Siefkas +Shekhar Gulati +Sheng Yang +Shengbo Song +Shih-Yuan Lee +Shijiang Wei +Shishir Mahajan +shuai-z +Shuwei Hao +Sian Lerk Lau +sidharthamani +Silas Sewell +Simei He +Simon Eskildsen +Simon Leinen +Simon Taranto +Sindhu S +Sjoerd Langkemper +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +srinsriv +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephen Crosby +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Subhajit Ghosh +Sujith Haridasan +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien Luttringer +Sébastien Stormacq +TAGOMORI Satoshi +tang0th +Tangi COLIN +Tatsuki Sugiura +Tatsushi Inagaki +Taylor Jones +tbonza +Ted M. Young +Tehmasp Chaudhri +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas LEVEIL +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Tianon Gravi +Tibor Vass +Tiffany Low +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonis Tiigi +Tonny Xu +Tony Daws +Tony Miller +toogley +Torstein Husebø +tpng +tracylihui <793912329@qq.com> +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +trishnaguha +Tristan Carel +Troy Denton +Tyler Brock +Tzu-Jung Lee +Tõnis Tiigi +Ulysse Carion +unknown +vagrant +Vaidas Jablonskis +Veres Lajos +vgeta +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Bernat +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +WANG Chao +Wang Xing +Ward Vandewege +WarheadsSE +Wayne Chang +Wei-Ting Kuo +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wes Morgan +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Riancho +William Thurston +WiseTrem +wlan0 +Wolfgang Powisch +wonderflow +xamyzhao +XiaoBing Jiang +Xiaoxu Chen +xiekeyang +Xinzi Zhou +Xiuming Chen +xlgao-zju +xuzhaokui +Yahya +YAMADA Tsuyoshi +Yan Feng +Yang Bai +yangshukui +Yasunori Mahata +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongzhi Pan +yorkie +Youcef YEKHLEF +Yuan Sun +yuchangchun +yuchengxia +Yurii Rashkovskii +yuzou +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +Zhenan Ye <21551168@zju.edu.cn> +Zhu Guihua +Zhuoyun Wei +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +搏通 diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE new file mode 100644 index 0000000000..8f3fee627a --- /dev/null +++ b/vendor/github.com/containers/storage/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE new file mode 100644 index 0000000000..8a37c1c7bc --- /dev/null +++ b/vendor/github.com/containers/storage/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2016 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/kr/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go new file mode 100644 index 0000000000..361563ab6f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_others.go @@ -0,0 +1,35 @@ +// +build !linux,!darwin + +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} + +// GetCacheHome is unsupported on non-linux system. +func GetCacheHome() (string, error) { + return "", errors.New("homedir.GetCacheHome() is not supported on this system") +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go new file mode 100644 index 0000000000..2475e351bb --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go @@ -0,0 +1,140 @@ +// +build !windows + +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "errors" + "os" + "path/filepath" + "strings" + + "github.com/containers/storage/pkg/unshare" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + homedir, _ := unshare.HomeDir() + return homedir +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} + +// GetCacheHome returns XDG_CACHE_HOME. +// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetCacheHome() (string, error) { + if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { + return xdgCacheHome, nil + } + home := Get() + if home == "" { + return "", errors.New("could not get either XDG_CACHE_HOME or HOME") + } + return filepath.Join(home, ".cache"), nil +} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go new file mode 100644 index 0000000000..4f2615ed32 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go @@ -0,0 +1,27 @@ +package homedir + +// Copyright 2013-2018 Docker, Inc. +// NOTE: this package has originally been copied from github.com/docker/docker. + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go new file mode 100644 index 0000000000..0cd386929a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools.go @@ -0,0 +1,325 @@ +package idtools + +import ( + "bufio" + "fmt" + "os" + "os/user" + "sort" + "strconv" + "strings" + "syscall" + + "github.com/containers/storage/pkg/system" + "github.com/pkg/errors" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName string = "/etc/subuid" + subgidFileName string = "/etc/subgid" +) + +// MkdirAllAs creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +// Deprecated: Use MkdirAllAndChown +func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +} + +// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +// Deprecated: Use MkdirAndChown with a IDPair +func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { + return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +} + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership to the requested uid/gid pair. +func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership +func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { + return mkdirAs(path, mode, ids.UID, ids.GID, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + var uid, gid int + var err error + if len(uidMap) == 1 && uidMap[0].Size == 1 { + uid = uidMap[0].HostID + } else { + uid, err = toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + } + if len(gidMap) == 1 && gidMap[0].Size == 1 { + gid = gidMap[0].HostID + } else { + gid, err = toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// IDPair is a UID and GID pair +type IDPair struct { + UID int + GID int +} + +// IDMappings contains a mappings of UIDs and GIDs +type IDMappings struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappings takes a requested user and group name and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIDMappings(username, groupname string) (*IDMappings, error) { + subuidRanges, err := parseSubuid(username) + if err != nil { + return nil, err + } + subgidRanges, err := parseSubgid(groupname) + if err != nil { + return nil, err + } + if len(subuidRanges) == 0 { + return nil, fmt.Errorf("No subuid ranges found for user %q in %s", username, subuidFileName) + } + if len(subgidRanges) == 0 { + return nil, fmt.Errorf("No subgid ranges found for group %q in %s", groupname, subgidFileName) + } + + return &IDMappings{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { + return &IDMappings{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IDMappings) RootPair() IDPair { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return IDPair{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IDMappings) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IDMappings) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + // sort the ranges by lowest ID first + sort.Sort(subidRanges) + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var ( + rangeList ranges + uidstr string + ) + if u, err := user.Lookup(username); err == nil { + uidstr = u.Uid + } + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + if err := s.Err(); err != nil { + return rangeList, err + } + + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + return rangeList, nil +} + +func checkChownErr(err error, name string, uid, gid int) error { + if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL { + return errors.Wrapf(err, "potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid", uid, gid, name) + } + return err +} + +func SafeChown(name string, uid, gid int) error { + if stat, statErr := system.Stat(name); statErr == nil { + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + } + return checkChownErr(os.Chown(name, uid, gid), name, uid, gid) +} + +func SafeLchown(name string, uid, gid int) error { + if stat, statErr := system.Lstat(name); statErr == nil { + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + } + return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000000..9776b2a128 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go @@ -0,0 +1,210 @@ +// +build !windows + +package idtools + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + var paths []string + st, err := os.Stat(path) + if err != nil && os.IsNotExist(err) { + paths = []string{path} + } else if err == nil { + if !st.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if chownExisting { + // short-circuit--we were called with an existing directory and chown was requested + return SafeChown(path, ownerUID, ownerGID) + } + // nothing to do; directory exists and chown was NOT requested + return nil + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := os.MkdirAll(path, mode); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair IDPair) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(username string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(username) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) +} + +func getentUser(args string) (user.User, error) { + reader, err := callGetent(args) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(groupname string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(groupname) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(fmt.Sprintf("%s %d", "group", gid)) +} + +func getentGroup(args string) (user.Group, error) { + reader, err := callGetent(args) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) + } + return groups[0], nil +} + +func callGetent(args string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("") + } + out, err := execCmd(getentCmd, args) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + terms := strings.Split(args, " ") + return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000000..16be94f446 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go @@ -0,0 +1,23 @@ +// +build windows + +package idtools + +import ( + "os" +) + +// Platforms such as Windows do not support the UID/GID concept. So make this +// just a wrapper around system.MkdirAll. +func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { + if err := os.MkdirAll(path, mode); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, pair IDPair) bool { + return true +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go new file mode 100644 index 0000000000..1c819a1f97 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/parser.go @@ -0,0 +1,59 @@ +package idtools + +import ( + "fmt" + "math" + "math/bits" + "strconv" + "strings" +) + +func parseTriple(spec []string) (container, host, size uint32, err error) { + cid, err := strconv.ParseUint(spec[0], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[0], err) + } + hid, err := strconv.ParseUint(spec[1], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[1], err) + } + sz, err := strconv.ParseUint(spec[2], 10, 32) + if err != nil { + return 0, 0, 0, fmt.Errorf("error parsing id map value %q: %v", spec[2], err) + } + return uint32(cid), uint32(hid), uint32(sz), nil +} + +// ParseIDMap parses idmap triples from string. +func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { + stdErr := fmt.Errorf("error initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) + for _, idMapSpec := range mapSpec { + if idMapSpec == "" { + continue + } + idSpec := strings.Split(idMapSpec, ":") + if len(idSpec)%3 != 0 { + return nil, stdErr + } + for i := range idSpec { + if i%3 != 0 { + continue + } + cid, hid, size, err := parseTriple(idSpec[i : i+3]) + if err != nil { + return nil, stdErr + } + // Avoid possible integer overflow on 32bit builds + if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { + return nil, stdErr + } + mapping := IDMap{ + ContainerID: int(cid), + HostID: int(hid), + Size: int(size), + } + idmap = append(idmap, mapping) + } + } + return idmap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000000..9da7975e2c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + + cmdTemplates = map[string]string{ + "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", + "useradd": "-r -s /bin/false %s", + "usermod": "-%s %d-%d %s", + } + + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 + userMod = "usermod" +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(userName string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + if userCommand == "" { + return fmt.Errorf("Cannot add user; no useradd/adduser binary found") + } + args := fmt.Sprintf(cmdTemplates[userCommand], userName) + out, err := execCmd(userCommand, args) + if err != nil { + return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000000..d98b354cbd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go new file mode 100644 index 0000000000..9703ecbd9d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package idtools + +import ( + "fmt" + "os/exec" + "path/filepath" + "strings" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + //only return no error if the final resolved binary basename + //matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd, args string) ([]byte, error) { + execCmd := exec.Command(cmd, strings.Split(args, " ")...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go new file mode 100644 index 0000000000..07a0f4847c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags.go @@ -0,0 +1,149 @@ +package mount + +import ( + "fmt" + "strings" +) + +var flags = map[string]struct { + clear bool + flag int +}{ + "defaults": {false, 0}, + "ro": {false, RDONLY}, + "rw": {true, RDONLY}, + "suid": {true, NOSUID}, + "nosuid": {false, NOSUID}, + "dev": {true, NODEV}, + "nodev": {false, NODEV}, + "exec": {true, NOEXEC}, + "noexec": {false, NOEXEC}, + "sync": {false, SYNCHRONOUS}, + "async": {true, SYNCHRONOUS}, + "dirsync": {false, DIRSYNC}, + "remount": {false, REMOUNT}, + "mand": {false, MANDLOCK}, + "nomand": {true, MANDLOCK}, + "atime": {true, NOATIME}, + "noatime": {false, NOATIME}, + "diratime": {true, NODIRATIME}, + "nodiratime": {false, NODIRATIME}, + "bind": {false, BIND}, + "rbind": {false, RBIND}, + "unbindable": {false, UNBINDABLE}, + "runbindable": {false, RUNBINDABLE}, + "private": {false, PRIVATE}, + "rprivate": {false, RPRIVATE}, + "shared": {false, SHARED}, + "rshared": {false, RSHARED}, + "slave": {false, SLAVE}, + "rslave": {false, RSLAVE}, + "relatime": {false, RELATIME}, + "norelatime": {true, RELATIME}, + "strictatime": {false, STRICTATIME}, + "nostrictatime": {true, STRICTATIME}, +} + +var validFlags = map[string]bool{ + "": true, + "size": true, + "mode": true, + "uid": true, + "gid": true, + "nr_inodes": true, + "nr_blocks": true, + "mpol": true, +} + +var propagationFlags = map[string]bool{ + "bind": true, + "rbind": true, + "unbindable": true, + "runbindable": true, + "private": true, + "rprivate": true, + "shared": true, + "rshared": true, + "slave": true, + "rslave": true, +} + +// MergeTmpfsOptions merge mount options to make sure there is no duplicate. +func MergeTmpfsOptions(options []string) ([]string, error) { + // We use collisions maps to remove duplicates. + // For flag, the key is the flag value (the key for propagation flag is -1) + // For data=value, the key is the data + flagCollisions := map[int]bool{} + dataCollisions := map[string]bool{} + + var newOptions []string + // We process in reverse order + for i := len(options) - 1; i >= 0; i-- { + option := options[i] + if option == "defaults" { + continue + } + if f, ok := flags[option]; ok && f.flag != 0 { + // There is only one propagation mode + key := f.flag + if propagationFlags[option] { + key = -1 + } + // Check to see if there is collision for flag + if !flagCollisions[key] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + flagCollisions[key] = true + } + continue + } + opt := strings.SplitN(option, "=", 2) + if len(opt) != 2 || !validFlags[opt[0]] { + return nil, fmt.Errorf("Invalid tmpfs option %q", opt) + } + if !dataCollisions[opt[0]] { + // We prepend the option and add to collision map + newOptions = append([]string{option}, newOptions...) + dataCollisions[opt[0]] = true + } + } + + return newOptions, nil +} + +// ParseOptions parses fstab type mount options into mount() flags +// and device specific data +func ParseOptions(options string) (int, string) { + var ( + flag int + data []string + ) + + for _, o := range strings.Split(options, ",") { + // If the option does not exist in the flags table or the flag + // is not supported on the platform, + // then it is a data value for a specific fs type + if f, exists := flags[o]; exists && f.flag != 0 { + if f.clear { + flag &= ^f.flag + } else { + flag |= f.flag + } + } else { + data = append(data, o) + } + } + return flag, strings.Join(data, ",") +} + +// ParseTmpfsOptions parse fstab type mount options into flags and data +func ParseTmpfsOptions(options string) (int, string, error) { + flags, data := ParseOptions(options) + for _, o := range strings.Split(data, ",") { + opt := strings.SplitN(o, "=", 2) + if !validFlags[opt[0]] { + return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) + } + } + return flags, data, nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go new file mode 100644 index 0000000000..0425d0dd63 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go @@ -0,0 +1,87 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // RDONLY will mount the file system read-only. + RDONLY = unix.MS_RDONLY + + // NOSUID will not allow set-user-identifier or set-group-identifier bits to + // take effect. + NOSUID = unix.MS_NOSUID + + // NODEV will not interpret character or block special devices on the file + // system. + NODEV = unix.MS_NODEV + + // NOEXEC will not allow execution of any binaries on the mounted file system. + NOEXEC = unix.MS_NOEXEC + + // SYNCHRONOUS will allow I/O to the file system to be done synchronously. + SYNCHRONOUS = unix.MS_SYNCHRONOUS + + // DIRSYNC will force all directory updates within the file system to be done + // synchronously. This affects the following system calls: create, link, + // unlink, symlink, mkdir, rmdir, mknod and rename. + DIRSYNC = unix.MS_DIRSYNC + + // REMOUNT will attempt to remount an already-mounted file system. This is + // commonly used to change the mount flags for a file system, especially to + // make a readonly file system writeable. It does not change device or mount + // point. + REMOUNT = unix.MS_REMOUNT + + // MANDLOCK will force mandatory locks on a filesystem. + MANDLOCK = unix.MS_MANDLOCK + + // NOATIME will not update the file access time when reading from a file. + NOATIME = unix.MS_NOATIME + + // NODIRATIME will not update the directory access time. + NODIRATIME = unix.MS_NODIRATIME + + // BIND remounts a subtree somewhere else. + BIND = unix.MS_BIND + + // RBIND remounts a subtree and all possible submounts somewhere else. + RBIND = unix.MS_BIND | unix.MS_REC + + // UNBINDABLE creates a mount which cannot be cloned through a bind operation. + UNBINDABLE = unix.MS_UNBINDABLE + + // RUNBINDABLE marks the entire mount tree as UNBINDABLE. + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC + + // PRIVATE creates a mount which carries no propagation abilities. + PRIVATE = unix.MS_PRIVATE + + // RPRIVATE marks the entire mount tree as PRIVATE. + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC + + // SLAVE creates a mount which receives propagation from its master, but not + // vice versa. + SLAVE = unix.MS_SLAVE + + // RSLAVE marks the entire mount tree as SLAVE. + RSLAVE = unix.MS_SLAVE | unix.MS_REC + + // SHARED creates a mount which provides the ability to create mirrors of + // that mount such that mounts and unmounts within any of the mirrors + // propagate to the other mirrors. + SHARED = unix.MS_SHARED + + // RSHARED marks the entire mount tree as SHARED. + RSHARED = unix.MS_SHARED | unix.MS_REC + + // RELATIME updates inode access times relative to modify or change time. + RELATIME = unix.MS_RELATIME + + // STRICTATIME allows to explicitly request full atime updates. This makes + // it possible for the kernel to default to relatime or noatime but still + // allow userspace to override it. + STRICTATIME = unix.MS_STRICTATIME + + mntDetach = unix.MNT_DETACH +) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go new file mode 100644 index 0000000000..9afd26d4c0 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go @@ -0,0 +1,31 @@ +// +build !linux + +package mount + +// These flags are unsupported. +const ( + BIND = 0 + DIRSYNC = 0 + MANDLOCK = 0 + NOATIME = 0 + NODEV = 0 + NODIRATIME = 0 + NOEXEC = 0 + NOSUID = 0 + UNBINDABLE = 0 + RUNBINDABLE = 0 + PRIVATE = 0 + RPRIVATE = 0 + SHARED = 0 + RSHARED = 0 + SLAVE = 0 + RSLAVE = 0 + RBIND = 0 + RELATIME = 0 + RELATIVE = 0 + REMOUNT = 0 + STRICTATIME = 0 + SYNCHRONOUS = 0 + RDONLY = 0 + mntDetach = 0 +) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go new file mode 100644 index 0000000000..cd4bacd663 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mount.go @@ -0,0 +1,105 @@ +package mount + +import ( + "sort" + "strconv" + "strings" +) + +// mountError holds an error from a mount or unmount operation +type mountError struct { + op string + source, target string + flags uintptr + data string + err error +} + +// Error returns a string representation of mountError +func (e *mountError) Error() string { + out := e.op + " " + + if e.source != "" { + out += e.source + ":" + e.target + } else { + out += e.target + } + + if e.flags != uintptr(0) { + out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) + } + if e.data != "" { + out += ", data: " + e.data + } + + out += ": " + e.err.Error() + return out +} + +// Cause returns the underlying cause of the error +func (e *mountError) Cause() error { + return e.err +} + +// Mount will mount filesystem according to the specified configuration, on the +// condition that the target path is *not* already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func Mount(device, target, mType, options string) error { + flag, data := ParseOptions(options) + if flag&REMOUNT != REMOUNT { + if mounted, err := Mounted(target); err != nil || mounted { + return err + } + } + return mount(device, target, mType, uintptr(flag), data) +} + +// ForceMount will mount a filesystem according to the specified configuration, +// *regardless* if the target path is not already mounted. Options must be +// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See +// flags.go for supported option flags. +func ForceMount(device, target, mType, options string) error { + flag, data := ParseOptions(options) + return mount(device, target, mType, uintptr(flag), data) +} + +// Unmount lazily unmounts a filesystem on supported platforms, otherwise +// does a normal unmount. +func Unmount(target string) error { + return unmount(target, mntDetach) +} + +// RecursiveUnmount unmounts the target and all mounts underneath, starting with +// the deepest mount first. +func RecursiveUnmount(target string) error { + mounts, err := GetMounts() + if err != nil { + return err + } + + // Make the deepest mount be first + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) + + for i, m := range mounts { + if !strings.HasPrefix(m.Mountpoint, target) { + continue + } + if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { + return err + // Ignore errors for submounts and continue trying to unmount others + // The final unmount should fail if there are any submounts remaining + } + } + return nil +} + +// ForceUnmount lazily unmounts a filesystem on supported platforms, +// otherwise does a normal unmount. +// +// Deprecated: please use Unmount instead, it is identical. +func ForceUnmount(target string) error { + return unmount(target, mntDetach) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go new file mode 100644 index 0000000000..b31cf99d0f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go @@ -0,0 +1,54 @@ +package mount + +/* +#include +#include +#include +#include +#include +#include +*/ +import "C" + +import ( + "fmt" + "strings" + "unsafe" +) + +func allocateIOVecs(options []string) []C.struct_iovec { + out := make([]C.struct_iovec, len(options)) + for i, option := range options { + out[i].iov_base = unsafe.Pointer(C.CString(option)) + out[i].iov_len = C.size_t(len(option) + 1) + } + return out +} + +func mount(device, target, mType string, flag uintptr, data string) error { + isNullFS := false + + xs := strings.Split(data, ",") + for _, x := range xs { + if x == "bind" { + isNullFS = true + } + } + + options := []string{"fspath", target} + if isNullFS { + options = append(options, "fstype", "nullfs", "target", device) + } else { + options = append(options, "fstype", mType, "from", device) + } + rawOptions := allocateIOVecs(options) + for _, rawOption := range rawOptions { + defer C.free(rawOption.iov_base) + } + + if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { + reason := C.GoString(C.strerror(*C.__error())) + return fmt.Errorf("Failed to call nmount: %s", reason) + } + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go new file mode 100644 index 0000000000..594cd0881a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go @@ -0,0 +1,74 @@ +package mount + +import ( + "golang.org/x/sys/unix" +) + +const ( + // ptypes is the set propagation types. + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE + + // pflags is the full set valid flags for a change propagation call. + pflags = ptypes | unix.MS_REC | unix.MS_SILENT + + // broflags is the combination of bind and read only + broflags = unix.MS_BIND | unix.MS_RDONLY + + none = "none" +) + +// isremount returns true if either device name or flags identify a remount request, false otherwise. +func isremount(device string, flags uintptr) bool { + switch { + // We treat device "" and "none" as a remount request to provide compatibility with + // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. + case flags&unix.MS_REMOUNT != 0, device == "", device == none: + return true + default: + return false + } +} + +func mount(device, target, mType string, flags uintptr, data string) error { + oflags := flags &^ ptypes + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return &mountError{ + op: "mount", + source: device, + target: target, + flags: oflags, + data: data, + err: err, + } + } + } + + if flags&ptypes != 0 { + // Change the propagation type. + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return &mountError{ + op: "remount", + target: target, + flags: flags & pflags, + err: err, + } + } + } + + if oflags&broflags == broflags { + // Remount the bind to apply read only. + if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { + return &mountError{ + op: "remount-ro", + target: target, + flags: oflags | unix.MS_REMOUNT, + err: err, + } + } + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go new file mode 100644 index 0000000000..42d1d422c5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go @@ -0,0 +1,7 @@ +// +build !linux + +package mount + +func mount(device, target, mType string, flag uintptr, data string) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go new file mode 100644 index 0000000000..bb2da474f4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go @@ -0,0 +1,13 @@ +package mount + +import ( + "github.com/moby/sys/mountinfo" +) + +type Info = mountinfo.Info + +var Mounted = mountinfo.Mounted + +func GetMounts() ([]*Info, error) { + return mountinfo.GetMounts(nil) +} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go new file mode 100644 index 0000000000..cbc0299fb5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go @@ -0,0 +1,5 @@ +package mount + +import "github.com/moby/sys/mountinfo" + +var PidMountInfo = mountinfo.PidMountInfo diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go new file mode 100644 index 0000000000..80922ad5ca --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go @@ -0,0 +1,64 @@ +package mount + +// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeShared(mountPoint string) error { + return ensureMountedAs(mountPoint, SHARED) +} + +// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRShared(mountPoint string) error { + return ensureMountedAs(mountPoint, RSHARED) +} + +// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. +// See the supported options in flags.go for further reference. +func MakePrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, PRIVATE) +} + +// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeRPrivate(mountPoint string) error { + return ensureMountedAs(mountPoint, RPRIVATE) +} + +// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, SLAVE) +} + +// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. +// See the supported options in flags.go for further reference. +func MakeRSlave(mountPoint string) error { + return ensureMountedAs(mountPoint, RSLAVE) +} + +// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option +// enabled. See the supported options in flags.go for further reference. +func MakeUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, UNBINDABLE) +} + +// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount +// option enabled. See the supported options in flags.go for further reference. +func MakeRUnbindable(mountPoint string) error { + return ensureMountedAs(mountPoint, RUNBINDABLE) +} + +func ensureMountedAs(mnt string, flags int) error { + mounted, err := Mounted(mnt) + if err != nil { + return err + } + + if !mounted { + if err := mount(mnt, mnt, "none", uintptr(BIND), ""); err != nil { + return err + } + } + + return mount("", mnt, "none", uintptr(flags), "") +} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go new file mode 100644 index 0000000000..1d1afeee2e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package mount + +import "golang.org/x/sys/unix" + +func unmount(target string, flags int) error { + err := unix.Unmount(target, flags) + if err == nil || err == unix.EINVAL { + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(flags), + err: err, + } +} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go new file mode 100644 index 0000000000..eebc4ab84e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go @@ -0,0 +1,7 @@ +// +build windows + +package mount + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md new file mode 100644 index 0000000000..6658f69b69 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go new file mode 100644 index 0000000000..372bee7321 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go @@ -0,0 +1,32 @@ +// +build linux + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go new file mode 100644 index 0000000000..1ecaa906fe --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go @@ -0,0 +1,30 @@ +// +build freebsd solaris darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// CommandContext returns *exec.Cmd which has Path as current binary. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go new file mode 100644 index 0000000000..9d93742685 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go @@ -0,0 +1,18 @@ +// +build !linux,!windows,!freebsd,!solaris,!darwin + +package reexec + +import ( + "context" + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} + +// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go new file mode 100644 index 0000000000..673ab476ab --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go @@ -0,0 +1,32 @@ +// +build windows + +package reexec + +import ( + "context" + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + cmd := exec.Command(Self()) + cmd.Args = args + return cmd +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func CommandContext(ctx context.Context, args ...string) *exec.Cmd { + cmd := exec.CommandContext(ctx, Self()) + cmd.Args = args + return cmd +} diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go new file mode 100644 index 0000000000..c56671d919 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/github.com/containers/storage/pkg/system/chmod.go b/vendor/github.com/containers/storage/pkg/system/chmod.go new file mode 100644 index 0000000000..a01d8abfbd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chmod.go @@ -0,0 +1,17 @@ +package system + +import ( + "errors" + "os" + "syscall" +) + +func Chmod(name string, mode os.FileMode) error { + err := os.Chmod(name, mode) + + for err != nil && errors.Is(err, syscall.EINTR) { + err = os.Chmod(name, mode) + } + + return err +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go new file mode 100644 index 0000000000..056d19954d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes.go @@ -0,0 +1,35 @@ +package system + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + if err := setCTime(name, mtime); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go new file mode 100644 index 0000000000..09d58bcbfd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go @@ -0,0 +1,14 @@ +// +build !windows + +package system + +import ( + "time" +) + +//setCTime will set the create time on a file. On Unix, the create +//time is updated as a side effect of setting the modified time, so +//no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go new file mode 100644 index 0000000000..45428c141c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go @@ -0,0 +1,28 @@ +// +build windows + +package system + +import ( + "time" + + "golang.org/x/sys/windows" +) + +//setCTime will set the create time on a file. On Windows, this requires +//calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go new file mode 100644 index 0000000000..288318985e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/errors.go @@ -0,0 +1,10 @@ +package system + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") +) diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go new file mode 100644 index 0000000000..60f0514b1d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/exitcode.go @@ -0,0 +1,33 @@ +package system + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} + +// ProcessExitCode process the specified error and returns the exit status code +// if the error was of type exec.ExitError, returns nothing otherwise. +func ProcessExitCode(err error) (exitCode int) { + if err != nil { + var exiterr error + if exitCode, exiterr = GetExitCode(err); exiterr != nil { + // TODO: Fix this so we check the error's text. + // we've failed to retrieve exit code, so we set it to 127 + exitCode = 127 + } + } + return +} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go new file mode 100644 index 0000000000..17935088de --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init.go @@ -0,0 +1,22 @@ +package system + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go new file mode 100644 index 0000000000..019c66441c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/init_windows.go @@ -0,0 +1,17 @@ +package system + +import "os" + +// LCOWSupported determines if Linux Containers on Windows are supported. +// Note: This feature is in development (06/17) and enabled through an +// environment variable. At a future time, it will be enabled based +// on build number. @jhowardmsft +var lcowSupported = false + +func init() { + // LCOW initialization + if os.Getenv("LCOW_SUPPORTED") != "" { + lcowSupported = true + } + +} diff --git a/vendor/github.com/containers/storage/pkg/system/lchown.go b/vendor/github.com/containers/storage/pkg/system/lchown.go new file mode 100644 index 0000000000..eb2d8b464c --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lchown.go @@ -0,0 +1,20 @@ +package system + +import ( + "os" + "syscall" +) + +func Lchown(name string, uid, gid int) error { + err := syscall.Lchown(name, uid, gid) + + for err == syscall.EINTR { + err = syscall.Lchown(name, uid, gid) + } + + if err != nil { + return &os.PathError{Op: "lchown", Path: name, Err: err} + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go new file mode 100644 index 0000000000..cff33bb408 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go new file mode 100644 index 0000000000..e54d01e696 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go @@ -0,0 +1,6 @@ +package system + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return lcowSupported +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go new file mode 100644 index 0000000000..e9d301f090 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package system + +import ( + "os" + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, &os.PathError{"Lstat", path, err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go new file mode 100644 index 0000000000..e51df0dafe --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go new file mode 100644 index 0000000000..3b6e947e67 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go new file mode 100644 index 0000000000..385f1d5e73 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go @@ -0,0 +1,65 @@ +package system + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go new file mode 100644 index 0000000000..925776e789 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go @@ -0,0 +1,129 @@ +// +build solaris,cgo + +package system + +import ( + "fmt" + "unsafe" +) + +// #cgo CFLAGS: -std=c99 +// #cgo LDFLAGS: -lkstat +// #include +// #include +// #include +// #include +// #include +// #include +// struct swaptable *allocSwaptable(int num) { +// struct swaptable *st; +// struct swapent *swapent; +// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); +// swapent = st->swt_ent; +// for (int i = 0; i < num; i++,swapent++) { +// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); +// } +// st->swt_n = num; +// return st; +//} +// void freeSwaptable (struct swaptable *st) { +// struct swapent *swapent = st->swt_ent; +// for (int i = 0; i < st->swt_n; i++,swapent++) { +// free(swapent->ste_path); +// } +// free(st); +// } +// swapent_t getSwapEnt(swapent_t *ent, int i) { +// return ent[i]; +// } +// int64_t getPpKernel() { +// int64_t pp_kernel = 0; +// kstat_ctl_t *ksc; +// kstat_t *ks; +// kstat_named_t *knp; +// kid_t kid; +// +// if ((ksc = kstat_open()) == NULL) { +// return -1; +// } +// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { +// return -1; +// } +// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || +// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { +// return -1; +// } +// switch (knp->data_type) { +// case KSTAT_DATA_UINT64: +// pp_kernel = knp->value.ui64; +// break; +// case KSTAT_DATA_UINT32: +// pp_kernel = knp->value.ui32; +// break; +// } +// pp_kernel *= sysconf(_SC_PAGESIZE); +// return (pp_kernel > 0 ? pp_kernel : -1); +// } +import "C" + +// Get the system memory info using sysconf same as prtconf +func getTotalMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_PHYS_PAGES) + return int64(pagesize * npages) +} + +func getFreeMem() int64 { + pagesize := C.sysconf(C._SC_PAGESIZE) + npages := C.sysconf(C._SC_AVPHYS_PAGES) + return int64(pagesize * npages) +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + + ppKernel := C.getPpKernel() + MemTotal := getTotalMem() + MemFree := getFreeMem() + SwapTotal, SwapFree, err := getSysSwap() + + if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || + SwapFree < 0 { + return nil, fmt.Errorf("error getting system memory info %v\n", err) + } + + meminfo := &MemInfo{} + // Total memory is total physical memory less than memory locked by kernel + meminfo.MemTotal = MemTotal - int64(ppKernel) + meminfo.MemFree = MemFree + meminfo.SwapTotal = SwapTotal + meminfo.SwapFree = SwapFree + + return meminfo, nil +} + +func getSysSwap() (int64, int64, error) { + var tSwap int64 + var fSwap int64 + var diskblksPerPage int64 + num, err := C.swapctl(C.SC_GETNSWP, nil) + if err != nil { + return -1, -1, err + } + st := C.allocSwaptable(num) + _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) + if err != nil { + C.freeSwaptable(st) + return -1, -1, err + } + + diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) + for i := 0; i < int(num); i++ { + swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) + tSwap += int64(swapent.ste_pages) * diskblksPerPage + fSwap += int64(swapent.ste_free) * diskblksPerPage + } + C.freeSwaptable(st) + return tSwap, fSwap, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000000..3ce019dffd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows,!solaris + +package system + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go new file mode 100644 index 0000000000..883944a4c5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go new file mode 100644 index 0000000000..af79a65383 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) +} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go new file mode 100644 index 0000000000..2e863c0215 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go @@ -0,0 +1,13 @@ +// +build windows + +package system + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go new file mode 100644 index 0000000000..f634a6be67 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path.go @@ -0,0 +1,21 @@ +package system + +import "runtime" + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(platform string) string { + if runtime.GOOS == "windows" { + if platform != runtime.GOOS && LCOWSupported() { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go new file mode 100644 index 0000000000..f3762e69d3 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_unix.go @@ -0,0 +1,9 @@ +// +build !windows + +package system + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. This is a no-op on Linux. +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go new file mode 100644 index 0000000000..aab891522d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/path_windows.go @@ -0,0 +1,33 @@ +// +build windows + +package system + +import ( + "fmt" + "path/filepath" + "strings" +) + +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !filepath.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go new file mode 100644 index 0000000000..a9a0dd7517 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/process_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd solaris darwin + +package system + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + _ = unix.Kill(pid, unix.SIGKILL) +} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go new file mode 100644 index 0000000000..510e714283 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/rm.go @@ -0,0 +1,79 @@ +package system + +import ( + "os" + "syscall" + "time" + + "github.com/containers/storage/pkg/mount" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 100 + + // Attempt to unmount anything beneath this dir first + if err := mount.RecursiveUnmount(dir); err != nil { + logrus.Debugf("RecusiveUnmount on %s failed: %v", dir, err) + } + + for { + err := os.RemoveAll(dir) + if err == nil { + return nil + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if e := mount.Unmount(pe.Path); e != nil { + return errors.Wrapf(e, "error while removing %s", dir) + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go new file mode 100644 index 0000000000..715f05b938 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go new file mode 100644 index 0000000000..715f05b938 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go new file mode 100644 index 0000000000..af7af20fa4 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_linux.go @@ -0,0 +1,19 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go new file mode 100644 index 0000000000..b607dea946 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go new file mode 100644 index 0000000000..b607dea946 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go new file mode 100644 index 0000000000..2fac918bfc --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_unix.go @@ -0,0 +1,74 @@ +// +build !windows + +package system + +import ( + "os" + "strconv" + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + } + return fromStatT(s) +} + +// Fstat takes an open file descriptor and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file descriptor is invalid +func Fstat(fd int) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Fstat(fd, s); err != nil { + return nil, &os.PathError{Op: "Fstat", Path: strconv.Itoa(fd), Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go new file mode 100644 index 0000000000..d306360520 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/stat_windows.go @@ -0,0 +1,63 @@ +package system + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// UID returns file's user id of owner. +// +// on windows this is always 0 because there is no concept of UID +func (s StatT) UID() uint32 { + return 0 +} + +// GID returns file's group id of owner. +// +// on windows this is always 0 because there is no concept of GID +func (s StatT) GID() uint32 { + return 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go new file mode 100644 index 0000000000..49dbdd3781 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go @@ -0,0 +1,17 @@ +// +build linux freebsd + +package system + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} + +// CommandLineToArgv should not be used on Unix. +// It simply returns commandLine in the only element in the returned array. +func CommandLineToArgv(commandLine string) ([]string, error) { + return []string{commandLine}, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go new file mode 100644 index 0000000000..23e9b207c7 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go @@ -0,0 +1,122 @@ +package system + +import ( + "unsafe" + + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// IsWindowsClient returns true if the SKU is client +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// IsIoTCore returns true if the currently running image is based off of +// Windows 10 IoT Core. +// @engine maintainers - this function should not be removed or modified as it +// is used to enforce licensing restrictions on Windows. +func IsIoTCore() bool { + var returnedProductType uint32 + r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) + if r1 == 0 { + logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) + return false + } + const productIoTUAP = 0x0000007B + const productIoTUAPCommercial = 0x00000083 + return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(dest string) error { + return nil +} + +// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. +func CommandLineToArgv(commandLine string) ([]string, error) { + var argc int32 + + argsPtr, err := windows.UTF16PtrFromString(commandLine) + if err != nil { + return nil, err + } + + argv, err := windows.CommandLineToArgv(argsPtr, &argc) + if err != nil { + return nil, err + } + defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) + + newArgs := make([]string, argc) + for i, v := range (*argv)[:argc] { + newArgs[i] = string(windows.UTF16ToString((*v)[:])) + } + + return newArgs, nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go new file mode 100644 index 0000000000..5a10eda5af --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go new file mode 100644 index 0000000000..13f1de1769 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/umask_windows.go @@ -0,0 +1,9 @@ +// +build windows + +package system + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go new file mode 100644 index 0000000000..6a77524376 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go @@ -0,0 +1,24 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + + if _, _, err := unix.Syscall(unix.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go new file mode 100644 index 0000000000..edc588a63f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go @@ -0,0 +1,25 @@ +package system + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + atFdCwd := unix.AT_FDCWD + + var _path *byte + _path, err := unix.BytePtrFromString(path) + if err != nil { + return err + } + if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000000..139714544d --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go new file mode 100644 index 0000000000..10355848bd --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go @@ -0,0 +1,84 @@ +package system + +import ( + "bytes" + "os" + + "golang.org/x/sys/unix" +) + +const ( + // Value is larger than the maximum size allowed + E2BIG unix.Errno = unix.E2BIG + + // Operation not supported + EOPNOTSUPP unix.Errno = unix.EOPNOTSUPP +) + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// Returns a []byte slice if the xattr is set and nil otherwise. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENODATA: + return nil, nil + case errno != nil: + return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + if err := unix.Lsetxattr(path, attr, data, flags); err != nil { + return &os.PathError{Op: "lsetxattr", Path: path, Err: err} + } + + return nil +} + +// Llistxattr lists extended attributes associated with the given path +// in the file system. +func Llistxattr(path string) ([]string, error) { + dest := make([]byte, 128) + sz, errno := unix.Llistxattr(path, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Llistxattr(path, []byte{}) + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + dest = make([]byte, sz) + sz, errno = unix.Llistxattr(path, dest) + } + if errno != nil { + return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} + } + + var attrs []string + for _, token := range bytes.Split(dest[:sz], []byte{0}) { + if len(token) > 0 { + attrs = append(attrs, string(token)) + } + } + + return attrs, nil +} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000000..bc8b8e3a5f --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go @@ -0,0 +1,28 @@ +// +build !linux + +package system + +import "syscall" + +const ( + // Value is larger than the maximum size allowed + E2BIG syscall.Errno = syscall.Errno(0) + + // Operation not supported + EOPNOTSUPP syscall.Errno = syscall.Errno(0) +) + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} + +// Llistxattr is not supported on platforms other than linux. +func Llistxattr(path string) ([]string, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go new file mode 100644 index 0000000000..4f441c32c5 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go @@ -0,0 +1,22 @@ +// +build linux,cgo + +package unshare + +import ( + "unsafe" +) + +/* +#cgo remoteclient CFLAGS: -Wall -Werror +#include +*/ +import "C" + +func getenv(name string) string { + cName := C.CString(name) + defer C.free(unsafe.Pointer(cName)) + + value := C.GoString(C.getenv(cName)) + + return value +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go new file mode 100644 index 0000000000..a5005403af --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go @@ -0,0 +1,11 @@ +// +build linux,!cgo + +package unshare + +import ( + "os" +) + +func getenv(name string) string { + return os.Getenv(name) +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c new file mode 100644 index 0000000000..c0e359b276 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.c @@ -0,0 +1,378 @@ +#ifndef UNSHARE_NO_CODE_AT_ALL + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Open Source projects like conda-forge, want to package podman and are based + off of centos:6, Conda-force has minimal libc requirements and is lacking + the memfd.h file, so we use mmam.h +*/ +#ifndef MFD_ALLOW_SEALING +#define MFD_ALLOW_SEALING 2U +#endif +#ifndef MFD_CLOEXEC +#define MFD_CLOEXEC 1U +#endif + +#ifndef F_LINUX_SPECIFIC_BASE +#define F_LINUX_SPECIFIC_BASE 1024 +#endif +#ifndef F_ADD_SEALS +#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) +#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) +#endif +#ifndef F_SEAL_SEAL +#define F_SEAL_SEAL 0x0001LU +#endif +#ifndef F_SEAL_SHRINK +#define F_SEAL_SHRINK 0x0002LU +#endif +#ifndef F_SEAL_GROW +#define F_SEAL_GROW 0x0004LU +#endif +#ifndef F_SEAL_WRITE +#define F_SEAL_WRITE 0x0008LU +#endif + +#define BUFSTEP 1024 + +static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; +static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; + +static int _containers_unshare_parse_envint(const char *envname) { + char *p, *q; + long l; + + p = getenv(envname); + if (p == NULL) { + return -1; + } + q = NULL; + l = strtol(p, &q, 10); + if ((q == NULL) || (*q != '\0')) { + fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); + _exit(1); + } + unsetenv(envname); + return l; +} + +static void _check_proc_sys_file(const char *path) +{ + FILE *fp; + char buf[32]; + size_t n_read; + long r; + + fp = fopen(path, "r"); + if (fp == NULL) { + if (errno != ENOENT) + fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces); + } else { + memset(buf, 0, sizeof(buf)); + n_read = fread(buf, 1, sizeof(buf) - 1, fp); + if (n_read > 0) { + r = atoi(buf); + if (r == 0) { + fprintf(stderr, "User namespaces are not enabled in %s.\n", path); + } + } else { + fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path); + } + fclose(fp); + } +} + +static char **parse_proc_stringlist(const char *list) { + int fd, n, i, n_strings; + char *buf, *new_buf, **ret; + size_t size, new_size, used; + + fd = open(list, O_RDONLY); + if (fd == -1) { + return NULL; + } + buf = NULL; + size = 0; + used = 0; + for (;;) { + new_size = used + BUFSTEP; + new_buf = realloc(buf, new_size); + if (new_buf == NULL) { + free(buf); + fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP)); + return NULL; + } + buf = new_buf; + size = new_size; + memset(buf + used, '\0', size - used); + n = read(fd, buf + used, size - used - 1); + if (n < 0) { + fprintf(stderr, "read(): %m\n"); + return NULL; + } + if (n == 0) { + break; + } + used += n; + } + close(fd); + n_strings = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + n_strings++; + } + } + ret = calloc(n_strings + 1, sizeof(char *)); + if (ret == NULL) { + fprintf(stderr, "calloc(): out of memory\n"); + return NULL; + } + i = 0; + for (n = 0; n < used; n++) { + if ((n == 0) || (buf[n-1] == '\0')) { + ret[i++] = &buf[n]; + } + } + ret[i] = NULL; + return ret; +} + +/* + * Taken from the runc cloned_binary.c file + * Copyright (C) 2019 Aleksa Sarai + * Copyright (C) 2019 SUSE LLC + * + * This work is dual licensed under the following licenses. You may use, + * redistribute, and/or modify the work under the conditions of either (or + * both) licenses. + * + * === Apache-2.0 === + */ +static int try_bindfd(void) +{ + int fd, ret = -1; + char src[PATH_MAX] = {0}; + char template[64] = {0}; + + strncpy(template, "/tmp/containers.XXXXXX", sizeof(template) - 1); + + /* + * We need somewhere to mount it, mounting anything over /proc/self is a + * BAD idea on the host -- even if we do it temporarily. + */ + fd = mkstemp(template); + if (fd < 0) + return ret; + close(fd); + + ret = -EPERM; + + if (readlink("/proc/self/exe", src, sizeof (src) - 1) < 0) + goto out; + + if (mount(src, template, NULL, MS_BIND, NULL) < 0) + goto out; + if (mount(NULL, template, NULL, MS_REMOUNT | MS_BIND | MS_RDONLY, NULL) < 0) + goto out_umount; + + /* Get read-only handle that we're sure can't be made read-write. */ + ret = open(template, O_PATH | O_CLOEXEC); + +out_umount: + /* + * Make sure the MNT_DETACH works, otherwise we could get remounted + * read-write and that would be quite bad (the fd would be made read-write + * too, invalidating the protection). + */ + if (umount2(template, MNT_DETACH) < 0) { + if (ret >= 0) + close(ret); + ret = -ENOTRECOVERABLE; + } + +out: + /* + * We don't care about unlink errors, the worst that happens is that + * there's an empty file left around in STATEDIR. + */ + unlink(template); + return ret; +} + +static int copy_self_proc_exe(char **argv) { + char *exename; + int fd, mmfd, n_read, n_written; + struct stat st; + char buf[2048]; + + fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC); + if (fd == -1) { + fprintf(stderr, "open(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (fstat(fd, &st) == -1) { + fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n"); + close(fd); + return -1; + } + exename = basename(argv[0]); + mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC); + if (mmfd == -1) { + fprintf(stderr, "memfd_create(): %m\n"); + goto close_fd; + } + for (;;) { + n_read = read(fd, buf, sizeof(buf)); + if (n_read < 0) { + fprintf(stderr, "read(\"/proc/self/exe\"): %m\n"); + return -1; + } + if (n_read == 0) { + break; + } + n_written = write(mmfd, buf, n_read); + if (n_written < 0) { + fprintf(stderr, "write(anonfd): %m\n"); + goto close_fd; + } + if (n_written != n_read) { + fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read); + goto close_fd; + } + } + close(fd); + if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) { + fprintf(stderr, "Close_Fd sealing memfd copy: %m\n"); + goto close_mmfd; + } + + return mmfd; + +close_fd: + close(fd); +close_mmfd: + close(mmfd); + return -1; +} +static int containers_reexec(int flags) { + char **argv; + int fd = -1; + + argv = parse_proc_stringlist("/proc/self/cmdline"); + if (argv == NULL) { + return -1; + } + + if (flags & CLONE_NEWNS) + fd = try_bindfd(); + if (fd < 0) + fd = copy_self_proc_exe(argv); + if (fd < 0) + return fd; + + if (fexecve(fd, argv, environ) == -1) { + close(fd); + fprintf(stderr, "Error during reexec(...): %m\n"); + return -1; + } + close(fd); + return 0; +} + +void _containers_unshare(void) +{ + int flags, pidfd, continuefd, n, pgrp, sid, ctty; + char buf[2048]; + + flags = _containers_unshare_parse_envint("_Containers-unshare"); + if (flags == -1) { + return; + } + if ((flags & CLONE_NEWUSER) != 0) { + if (unshare(CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); + _check_proc_sys_file (_max_user_namespaces); + _check_proc_sys_file (_unprivileged_user_namespaces); + _exit(1); + } + } + pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); + if (pidfd != -1) { + snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); + size_t size = write(pidfd, buf, strlen(buf)); + if (size != strlen(buf)) { + fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); + _exit(1); + } + close(pidfd); + } + continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); + if (continuefd != -1) { + n = read(continuefd, buf, sizeof(buf)); + if (n > 0) { + fprintf(stderr, "Error: %.*s\n", n, buf); + _exit(1); + } + close(continuefd); + } + sid = _containers_unshare_parse_envint("_Containers-setsid"); + if (sid == 1) { + if (setsid() == -1) { + fprintf(stderr, "Error during setsid: %m\n"); + _exit(1); + } + } + pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); + if (pgrp == 1) { + if (setpgrp() == -1) { + fprintf(stderr, "Error during setpgrp: %m\n"); + _exit(1); + } + } + ctty = _containers_unshare_parse_envint("_Containers-ctty"); + if (ctty != -1) { + if (ioctl(ctty, TIOCSCTTY, 0) == -1) { + fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); + _exit(1); + } + } + if ((flags & CLONE_NEWUSER) != 0) { + if (setresgid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresgid(0): %m\n"); + _exit(1); + } + if (setresuid(0, 0, 0) != 0) { + fprintf(stderr, "Error during setresuid(0): %m\n"); + _exit(1); + } + } + if ((flags & ~CLONE_NEWUSER) != 0) { + if (unshare(flags & ~CLONE_NEWUSER) == -1) { + fprintf(stderr, "Error during unshare(...): %m\n"); + _exit(1); + } + } + if (containers_reexec(flags) != 0) { + _exit(1); + } + return; +} + +#endif // !UNSHARE_NO_CODE_AT_ALL diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go new file mode 100644 index 0000000000..a9210b0bf1 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare.go @@ -0,0 +1,34 @@ +package unshare + +import ( + "fmt" + "os" + "os/user" + "sync" + + "github.com/pkg/errors" +) + +var ( + homeDirOnce sync.Once + homeDirErr error + homeDir string +) + +// HomeDir returns the home directory for the current user. +func HomeDir() (string, error) { + homeDirOnce.Do(func() { + home := os.Getenv("HOME") + if home == "" { + usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) + if err != nil { + homeDir, homeDirErr = "", errors.Wrapf(err, "unable to resolve HOME directory") + return + } + homeDir, homeDirErr = usr.HomeDir, nil + return + } + homeDir, homeDirErr = home, nil + }) + return homeDir, homeDirErr +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go new file mode 100644 index 0000000000..b3f8099f6a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go @@ -0,0 +1,10 @@ +// +build linux,cgo,!gccgo + +package unshare + +// #cgo CFLAGS: -Wall +// extern void _containers_unshare(void); +// void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go new file mode 100644 index 0000000000..2f95da7d8e --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go @@ -0,0 +1,25 @@ +// +build linux,cgo,gccgo + +package unshare + +// #cgo CFLAGS: -Wall -Wextra +// extern void _containers_unshare(void); +// void __attribute__((constructor)) init(void) { +// _containers_unshare(); +// } +import "C" + +// This next bit is straight out of libcontainer. + +// AlwaysFalse is here to stay false +// (and be exported so the compiler doesn't optimize out its reference) +var AlwaysFalse bool + +func init() { + if AlwaysFalse { + // by referencing this C init() in a noop test, it will ensure the compiler + // links in the C function. + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 + C.init() + } +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go new file mode 100644 index 0000000000..96b8575431 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go @@ -0,0 +1,605 @@ +// +build linux + +package unshare + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "os/exec" + "os/user" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + + "github.com/containers/storage/pkg/idtools" + "github.com/containers/storage/pkg/reexec" + "github.com/opencontainers/runtime-spec/specs-go" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/syndtr/gocapability/capability" +) + +// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and +// handles setting ID maps and other related settings by triggering +// initialization code in the child. +type Cmd struct { + *exec.Cmd + UnshareFlags int + UseNewuidmap bool + UidMappings []specs.LinuxIDMapping // nolint: golint + UseNewgidmap bool + GidMappings []specs.LinuxIDMapping // nolint: golint + GidMappingsEnableSetgroups bool + Setsid bool + Setpgrp bool + Ctty *os.File + OOMScoreAdj *int + Hook func(pid int) error +} + +// Command creates a new Cmd which can be customized. +func Command(args ...string) *Cmd { + cmd := reexec.Command(args...) + return &Cmd{ + Cmd: cmd, + } +} + +func getRootlessUID() int { + uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Geteuid() +} + +func getRootlessGID() int { + gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") + if gidEnv != "" { + u, _ := strconv.Atoi(gidEnv) + return u + } + + /* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */ + uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getegid() +} + +func (c *Cmd) Start() error { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // Set an environment variable to tell the child to synchronize its startup. + if c.Env == nil { + c.Env = os.Environ() + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags)) + + // Please the libpod "rootless" package to find the expected env variables. + if IsRootless() { + c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done") + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", getRootlessUID())) + c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", getRootlessGID())) + } + + // Create the pipe for reading the child's PID. + pidRead, pidWrite, err := os.Pipe() + if err != nil { + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, pidWrite) + + // Create the pipe for letting the child know to proceed. + continueRead, continueWrite, err := os.Pipe() + if err != nil { + pidRead.Close() + pidWrite.Close() + return errors.Wrapf(err, "error creating pid pipe") + } + c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, continueRead) + + // Pass along other instructions. + if c.Setsid { + c.Env = append(c.Env, "_Containers-setsid=1") + } + if c.Setpgrp { + c.Env = append(c.Env, "_Containers-setpgrp=1") + } + if c.Ctty != nil { + c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) + c.ExtraFiles = append(c.ExtraFiles, c.Ctty) + } + + // Make sure we clean up our pipes. + defer func() { + if pidRead != nil { + pidRead.Close() + } + if pidWrite != nil { + pidWrite.Close() + } + if continueRead != nil { + continueRead.Close() + } + if continueWrite != nil { + continueWrite.Close() + } + }() + + // Start the new process. + err = c.Cmd.Start() + if err != nil { + return err + } + + // Close the ends of the pipes that the parent doesn't need. + continueRead.Close() + continueRead = nil + pidWrite.Close() + pidWrite = nil + + // Read the child's PID from the pipe. + pidString := "" + b := new(bytes.Buffer) + if _, err := io.Copy(b, pidRead); err != nil { + return errors.Wrapf(err, "error reading child PID") + } + pidString = b.String() + pid, err := strconv.Atoi(pidString) + if err != nil { + fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) + return errors.Wrapf(err, "error parsing PID %q", pidString) + } + pidString = fmt.Sprintf("%d", pid) + + // If we created a new user namespace, set any specified mappings. + if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { + // Always set "setgroups". + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if c.GidMappingsEnableSetgroups { + if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) + return errors.Wrapf(err, "error opening \"allow\" to /proc/%s/setgroups", pidString) + } + } else { + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) + return errors.Wrapf(err, "error writing \"deny\" to /proc/%s/setgroups", pidString) + } + } + + if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { + uidmap, gidmap, err := GetHostIDMappings("") + if err != nil { + fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) + return errors.Wrapf(err, "error reading ID mappings in parent") + } + if len(c.UidMappings) == 0 { + c.UidMappings = uidmap + for i := range c.UidMappings { + c.UidMappings[i].HostID = c.UidMappings[i].ContainerID + } + } + if len(c.GidMappings) == 0 { + c.GidMappings = gidmap + for i := range c.GidMappings { + c.GidMappings[i].HostID = c.GidMappings[i].ContainerID + } + } + } + + if len(c.GidMappings) > 0 { + // Build the GID map, since writing to the proc file has to be done all at once. + g := new(bytes.Buffer) + for _, m := range c.GidMappings { + fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + gidmapSet := false + // Set the GID map. + if c.UseNewgidmap { + cmd := exec.Command("newgidmap", append([]string{pidString}, strings.Fields(strings.Replace(g.String(), "\n", " ", -1))...)...) + g.Reset() + cmd.Stdout = g + cmd.Stderr = g + err := cmd.Run() + if err == nil { + gidmapSet = true + } else { + logrus.Warnf("error running newgidmap: %v: %s", err, g.String()) + logrus.Warnf("falling back to single mapping") + g.Reset() + g.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Getegid()))) + } + } + if !gidmapSet { + if c.UseNewgidmap { + setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/setgroups", pidString) + } + defer setgroups.Close() + if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { + fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) + return errors.Wrapf(err, "error writing 'deny' to /proc/%s/setgroups", pidString) + } + } + gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/gid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/gid_map", pidString) + } + defer gidmap.Close() + if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) + return errors.Wrapf(err, "error writing %q to /proc/%s/gid_map", g.String(), pidString) + } + } + } + + if len(c.UidMappings) > 0 { + // Build the UID map, since writing to the proc file has to be done all at once. + u := new(bytes.Buffer) + for _, m := range c.UidMappings { + fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) + } + uidmapSet := false + // Set the GID map. + if c.UseNewuidmap { + cmd := exec.Command("newuidmap", append([]string{pidString}, strings.Fields(strings.Replace(u.String(), "\n", " ", -1))...)...) + u.Reset() + cmd.Stdout = u + cmd.Stderr = u + err := cmd.Run() + if err == nil { + uidmapSet = true + } else { + logrus.Warnf("error running newuidmap: %v: %s", err, u.String()) + logrus.Warnf("falling back to single mapping") + u.Reset() + u.Write([]byte(fmt.Sprintf("0 %d 1\n", os.Geteuid()))) + } + } + if !uidmapSet { + uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) + return errors.Wrapf(err, "error opening /proc/%s/uid_map", pidString) + } + defer uidmap.Close() + if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { + fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) + return errors.Wrapf(err, "error writing %q to /proc/%s/uid_map", u.String(), pidString) + } + } + } + } + + if c.OOMScoreAdj != nil { + oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) + if err != nil { + fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) + return errors.Wrapf(err, "error opening /proc/%s/oom_score_adj", pidString) + } + defer oomScoreAdj.Close() + if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { + fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) + return errors.Wrapf(err, "error writing \"%d\" to /proc/%s/oom_score_adj", c.OOMScoreAdj, pidString) + } + } + // Run any additional setup that we want to do before the child starts running proper. + if c.Hook != nil { + if err = c.Hook(pid); err != nil { + fmt.Fprintf(continueWrite, "hook error: %v", err) + return err + } + } + + return nil +} + +func (c *Cmd) Run() error { + if err := c.Start(); err != nil { + return err + } + return c.Wait() +} + +func (c *Cmd) CombinedOutput() ([]byte, error) { + return nil, errors.New("unshare: CombinedOutput() not implemented") +} + +func (c *Cmd) Output() ([]byte, error) { + return nil, errors.New("unshare: Output() not implemented") +} + +var ( + isRootlessOnce sync.Once + isRootless bool +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + isRootlessOnce.Do(func() { + isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != "" + }) + return isRootless +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") + if uidEnv != "" { + u, _ := strconv.Atoi(uidEnv) + return u + } + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=done") +} + +type Runnable interface { + Run() error +} + +func bailOnError(err error, format string, a ...interface{}) { // nolint: golint,goprintffuncname + if err != nil { + if format != "" { + logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) + } else { + logrus.Errorf("%v", err) + } + os.Exit(1) + } +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { + // If we've already been through this once, no need to try again. + if os.Geteuid() == 0 && IsRootless() { + return + } + + var uidNum, gidNum uint64 + // Figure out who we are. + me, err := user.Current() + if !os.IsNotExist(err) { + bailOnError(err, "error determining current user") + uidNum, err = strconv.ParseUint(me.Uid, 10, 32) + bailOnError(err, "error parsing current UID %s", me.Uid) + gidNum, err = strconv.ParseUint(me.Gid, 10, 32) + bailOnError(err, "error parsing current GID %s", me.Gid) + } + + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + // ID mappings to use to reexec ourselves. + var uidmap, gidmap []specs.LinuxIDMapping + if uidNum != 0 || evenForRoot { + // Read the set of ID mappings that we're allowed to use. Each + // range in /etc/subuid and /etc/subgid file is a starting host + // ID and a range size. + uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) + if err != nil { + logrus.Warnf("error reading allowed ID mappings: %v", err) + } + if len(uidmap) == 0 { + logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) + } + if len(gidmap) == 0 { + logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username) + } + // Map our UID and GID, then the subuid and subgid ranges, + // consecutively, starting at 0, to get the mappings to use for + // a copy of ourselves. + uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...) + gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...) + var rangeStart uint32 + for i := range uidmap { + uidmap[i].ContainerID = rangeStart + rangeStart += uidmap[i].Size + } + rangeStart = 0 + for i := range gidmap { + gidmap[i].ContainerID = rangeStart + rangeStart += gidmap[i].Size + } + } else { + // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able + // to use unshare(), so don't bother creating a new user namespace at this point. + capabilities, err := capability.NewPid(0) + bailOnError(err, "error reading the current capabilities sets") + if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { + return + } + // Read the set of ID mappings that we're currently using. + uidmap, gidmap, err = GetHostIDMappings("") + bailOnError(err, "error reading current ID mappings") + // Just reuse them. + for i := range uidmap { + uidmap[i].HostID = uidmap[i].ContainerID + } + for i := range gidmap { + gidmap[i].HostID = gidmap[i].ContainerID + } + } + + // Unlike most uses of reexec or unshare, we're using a name that + // _won't_ be recognized as a registered reexec handler, since we + // _want_ to fall through reexec.Init() to the normal main(). + cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...) + + // If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again. + err = os.Setenv(UsernsEnvName, "1") + bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) + + // Set the default isolation type to use the "rootless" method. + if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { + if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { + logrus.Errorf("error setting BUILDAH_ISOLATION=rootless in environment: %v", err) + os.Exit(1) + } + } + } + + // Reuse our stdio. + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + // Set up a new user namespace with the ID mapping. + cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS + cmd.UseNewuidmap = uidNum != 0 + cmd.UidMappings = uidmap + cmd.UseNewgidmap = uidNum != 0 + cmd.GidMappings = gidmap + cmd.GidMappingsEnableSetgroups = true + + // Finish up. + logrus.Debugf("running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) + ExecRunnable(cmd, nil) +} + +// ExecRunnable runs the specified unshare command, captures its exit status, +// and exits with the same status. +func ExecRunnable(cmd Runnable, cleanup func()) { + exit := func(status int) { + if cleanup != nil { + cleanup() + } + os.Exit(status) + } + if err := cmd.Run(); err != nil { + if exitError, ok := errors.Cause(err).(*exec.ExitError); ok { + if exitError.ProcessState.Exited() { + if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { + if waitStatus.Exited() { + logrus.Errorf("%v", exitError) + exit(waitStatus.ExitStatus()) + } + if waitStatus.Signaled() { + logrus.Errorf("%v", exitError) + exit(int(waitStatus.Signal()) + 128) + } + } + } + } + logrus.Errorf("%v", err) + logrus.Errorf("(unable to determine exit status)") + exit(1) + } + exit(0) +} + +// getHostIDMappings reads mappings from the named node under /proc. +func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { + var mappings []specs.LinuxIDMapping + f, err := os.Open(path) + if err != nil { + return nil, errors.Wrapf(err, "error reading ID mappings from %q", path) + } + defer f.Close() + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + fields := strings.Fields(line) + if len(fields) != 3 { + return nil, errors.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) + } + cid, err := strconv.ParseUint(fields[0], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing container ID value %q from line %q in %q", fields[0], line, path) + } + hid, err := strconv.ParseUint(fields[1], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing host ID value %q from line %q in %q", fields[1], line, path) + } + size, err := strconv.ParseUint(fields[2], 10, 32) + if err != nil { + return nil, errors.Wrapf(err, "error parsing size value %q from line %q in %q", fields[2], line, path) + } + mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) + } + return mappings, nil +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + if pid == "" { + pid = "self" + } + uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) + if err != nil { + return nil, nil, err + } + gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) + if err != nil { + return nil, nil, err + } + return uidmap, gidmap, nil +} + +// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. +func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + mappings, err := idtools.NewIDMappings(user, group) + if err != nil { + return nil, nil, errors.Wrapf(err, "error reading subuid mappings for user %q and subgid mappings for group %q", user, group) + } + var uidmap, gidmap []specs.LinuxIDMapping + for _, m := range mappings.UIDs() { + uidmap = append(uidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + for _, m := range mappings.GIDs() { + gidmap = append(gidmap, specs.LinuxIDMapping{ + ContainerID: uint32(m.ContainerID), + HostID: uint32(m.HostID), + Size: uint32(m.Size), + }) + } + return uidmap, gidmap, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") + if err != nil { + return nil, nil, err + } + gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") + if err != nil { + return nil, nil, err + } + return uid, gid, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go new file mode 100644 index 0000000000..bf4d567b8a --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go @@ -0,0 +1,45 @@ +// +build !linux + +package unshare + +import ( + "os" + + "github.com/containers/storage/pkg/idtools" + "github.com/opencontainers/runtime-spec/specs-go" +) + +const ( + // UsernsEnvName is the environment variable, if set indicates in rootless mode + UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" +) + +// IsRootless tells us if we are running in rootless mode +func IsRootless() bool { + return false +} + +// GetRootlessUID returns the UID of the user in the parent userNS +func GetRootlessUID() int { + return os.Getuid() +} + +// RootlessEnv returns the environment settings for the rootless containers +func RootlessEnv() []string { + return append(os.Environ(), UsernsEnvName+"=") +} + +// MaybeReexecUsingUserNamespace re-exec the process in a new namespace +func MaybeReexecUsingUserNamespace(evenForRoot bool) { +} + +// GetHostIDMappings reads mappings for the specified process (or the current +// process if pid is "self" or an empty string) from the kernel. +func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { + return nil, nil, nil +} + +// ParseIDMappings parses mapping triples. +func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { + return nil, nil, nil +} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go new file mode 100644 index 0000000000..d5f2d22a80 --- /dev/null +++ b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go @@ -0,0 +1,10 @@ +// +build !linux,cgo + +package unshare + +// Go refuses to compile a subpackage with CGO_ENABLED=1 if there is a *.c file but no 'import "C"'. +// OTOH if we did have an 'import "C"', the Linux-only code would fail to compile. +// So, satisfy the Go compiler by using import "C" but #ifdef-ing out all of the code. + +// #cgo CPPFLAGS: -DUNSHARE_NO_CODE_AT_ALL +import "C" diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE new file mode 100644 index 0000000000..23a0ada2fb --- /dev/null +++ b/vendor/github.com/coreos/go-semver/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2018 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go new file mode 100644 index 0000000000..76cf4852c7 --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -0,0 +1,296 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Semantic Versions http://semver.org +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +type Version struct { + Major int64 + Minor int64 + Patch int64 + PreRelease PreRelease + Metadata string +} + +type PreRelease string + +func splitOff(input *string, delim string) (val string) { + parts := strings.SplitN(*input, delim, 2) + + if len(parts) == 2 { + *input = parts[0] + val = parts[1] + } + + return val +} + +func New(version string) *Version { + return Must(NewVersion(version)) +} + +func NewVersion(version string) (*Version, error) { + v := Version{} + + if err := v.Set(version); err != nil { + return nil, err + } + + return &v, nil +} + +// Must is a helper for wrapping NewVersion and will panic if err is not nil. +func Must(v *Version, err error) *Version { + if err != nil { + panic(err) + } + return v +} + +// Set parses and updates v from the given version string. Implements flag.Value +func (v *Version) Set(version string) error { + metadata := splitOff(&version, "+") + preRelease := PreRelease(splitOff(&version, "-")) + dotParts := strings.SplitN(version, ".", 3) + + if len(dotParts) != 3 { + return fmt.Errorf("%s is not in dotted-tri format", version) + } + + if err := validateIdentifier(string(preRelease)); err != nil { + return fmt.Errorf("failed to validate pre-release: %v", err) + } + + if err := validateIdentifier(metadata); err != nil { + return fmt.Errorf("failed to validate metadata: %v", err) + } + + parsed := make([]int64, 3, 3) + + for i, v := range dotParts[:3] { + val, err := strconv.ParseInt(v, 10, 64) + parsed[i] = val + if err != nil { + return err + } + } + + v.Metadata = metadata + v.PreRelease = preRelease + v.Major = parsed[0] + v.Minor = parsed[1] + v.Patch = parsed[2] + return nil +} + +func (v Version) String() string { + var buffer bytes.Buffer + + fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) + + if v.PreRelease != "" { + fmt.Fprintf(&buffer, "-%s", v.PreRelease) + } + + if v.Metadata != "" { + fmt.Fprintf(&buffer, "+%s", v.Metadata) + } + + return buffer.String() +} + +func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data string + if err := unmarshal(&data); err != nil { + return err + } + return v.Set(data) +} + +func (v Version) MarshalJSON() ([]byte, error) { + return []byte(`"` + v.String() + `"`), nil +} + +func (v *Version) UnmarshalJSON(data []byte) error { + l := len(data) + if l == 0 || string(data) == `""` { + return nil + } + if l < 2 || data[0] != '"' || data[l-1] != '"' { + return errors.New("invalid semver string") + } + return v.Set(string(data[1 : l-1])) +} + +// Compare tests if v is less than, equal to, or greater than versionB, +// returning -1, 0, or +1 respectively. +func (v Version) Compare(versionB Version) int { + if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 { + return cmp + } + return preReleaseCompare(v, versionB) +} + +// Equal tests if v is equal to versionB. +func (v Version) Equal(versionB Version) bool { + return v.Compare(versionB) == 0 +} + +// LessThan tests if v is less than versionB. +func (v Version) LessThan(versionB Version) bool { + return v.Compare(versionB) < 0 +} + +// Slice converts the comparable parts of the semver into a slice of integers. +func (v Version) Slice() []int64 { + return []int64{v.Major, v.Minor, v.Patch} +} + +func (p PreRelease) Slice() []string { + preRelease := string(p) + return strings.Split(preRelease, ".") +} + +func preReleaseCompare(versionA Version, versionB Version) int { + a := versionA.PreRelease + b := versionB.PreRelease + + /* Handle the case where if two versions are otherwise equal it is the + * one without a PreRelease that is greater */ + if len(a) == 0 && (len(b) > 0) { + return 1 + } else if len(b) == 0 && (len(a) > 0) { + return -1 + } + + // If there is a prerelease, check and compare each part. + return recursivePreReleaseCompare(a.Slice(), b.Slice()) +} + +func recursiveCompare(versionA []int64, versionB []int64) int { + if len(versionA) == 0 { + return 0 + } + + a := versionA[0] + b := versionB[0] + + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursiveCompare(versionA[1:], versionB[1:]) +} + +func recursivePreReleaseCompare(versionA []string, versionB []string) int { + // A larger set of pre-release fields has a higher precedence than a smaller set, + // if all of the preceding identifiers are equal. + if len(versionA) == 0 { + if len(versionB) > 0 { + return -1 + } + return 0 + } else if len(versionB) == 0 { + // We're longer than versionB so return 1. + return 1 + } + + a := versionA[0] + b := versionB[0] + + aInt := false + bInt := false + + aI, err := strconv.Atoi(versionA[0]) + if err == nil { + aInt = true + } + + bI, err := strconv.Atoi(versionB[0]) + if err == nil { + bInt = true + } + + // Numeric identifiers always have lower precedence than non-numeric identifiers. + if aInt && !bInt { + return -1 + } else if !aInt && bInt { + return 1 + } + + // Handle Integer Comparison + if aInt && bInt { + if aI > bI { + return 1 + } else if aI < bI { + return -1 + } + } + + // Handle String Comparison + if a > b { + return 1 + } else if a < b { + return -1 + } + + return recursivePreReleaseCompare(versionA[1:], versionB[1:]) +} + +// BumpMajor increments the Major field by 1 and resets all other fields to their default values +func (v *Version) BumpMajor() { + v.Major += 1 + v.Minor = 0 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpMinor increments the Minor field by 1 and resets all other fields to their default values +func (v *Version) BumpMinor() { + v.Minor += 1 + v.Patch = 0 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// BumpPatch increments the Patch field by 1 and resets all other fields to their default values +func (v *Version) BumpPatch() { + v.Patch += 1 + v.PreRelease = PreRelease("") + v.Metadata = "" +} + +// validateIdentifier makes sure the provided identifier satisfies semver spec +func validateIdentifier(id string) error { + if id != "" && !reIdentifier.MatchString(id) { + return fmt.Errorf("%s is not a valid semver identifier", id) + } + return nil +} + +// reIdentifier is a regular expression used to check that pre-release and metadata +// identifiers satisfy the spec requirements +var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`) diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go new file mode 100644 index 0000000000..e256b41a5d --- /dev/null +++ b/vendor/github.com/coreos/go-semver/semver/sort.go @@ -0,0 +1,38 @@ +// Copyright 2013-2015 CoreOS, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semver + +import ( + "sort" +) + +type Versions []*Version + +func (s Versions) Len() int { + return len(s) +} + +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Versions) Less(i, j int) bool { + return s[i].LessThan(*s[j]) +} + +// Sort sorts the given slice of Version +func Sort(versions []*Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/dgraph-io/ristretto/.deepsource.toml b/vendor/github.com/dgraph-io/ristretto/.deepsource.toml new file mode 100644 index 0000000000..40609eff3f --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/.deepsource.toml @@ -0,0 +1,17 @@ +version = 1 + +test_patterns = [ + '**/*_test.go' +] + +exclude_patterns = [ + +] + +[[analyzers]] +name = 'go' +enabled = true + + + [analyzers.meta] + import_path = 'github.com/dgraph-io/ristretto' diff --git a/vendor/github.com/dgraph-io/ristretto/LICENSE b/vendor/github.com/dgraph-io/ristretto/LICENSE new file mode 100644 index 0000000000..d9a10c0d8e --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/LICENSE @@ -0,0 +1,176 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/dgraph-io/ristretto/README.md b/vendor/github.com/dgraph-io/ristretto/README.md new file mode 100644 index 0000000000..2041d190c4 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/README.md @@ -0,0 +1,209 @@ +# Ristretto +[![Go Doc](https://img.shields.io/badge/godoc-reference-blue.svg)](http://godoc.org/github.com/dgraph-io/ristretto) +[![Go Report Card](https://img.shields.io/badge/go%20report-A%2B-brightgreen)](https://goreportcard.com/report/github.com/dgraph-io/ristretto) +[![Coverage](https://img.shields.io/badge/coverage-100%25-brightgreen)](https://gocover.io/github.com/dgraph-io/ristretto) +![Tests](https://github.com/dgraph-io/ristretto/workflows/tests/badge.svg) + +Ristretto is a fast, concurrent cache library built with a focus on performance and correctness. + +The motivation to build Ristretto comes from the need for a contention-free +cache in [Dgraph][]. + +[Dgraph]: https://github.com/dgraph-io/dgraph + +## Features + +* **High Hit Ratios** - with our unique admission/eviction policy pairing, Ristretto's performance is best in class. + * **Eviction: SampledLFU** - on par with exact LRU and better performance on Search and Database traces. + * **Admission: TinyLFU** - extra performance with little memory overhead (12 bits per counter). +* **Fast Throughput** - we use a variety of techniques for managing contention and the result is excellent throughput. +* **Cost-Based Eviction** - any large new item deemed valuable can evict multiple smaller items (cost could be anything). +* **Fully Concurrent** - you can use as many goroutines as you want with little throughput degradation. +* **Metrics** - optional performance metrics for throughput, hit ratios, and other stats. +* **Simple API** - just figure out your ideal `Config` values and you're off and running. + +## Status + +Ristretto is usable but still under active development. We expect it to be production ready in the near future. + +## Table of Contents + +* [Usage](#Usage) + * [Example](#Example) + * [Config](#Config) + * [NumCounters](#Config) + * [MaxCost](#Config) + * [BufferItems](#Config) + * [Metrics](#Config) + * [OnEvict](#Config) + * [KeyToHash](#Config) + * [Cost](#Config) +* [Benchmarks](#Benchmarks) + * [Hit Ratios](#Hit-Ratios) + * [Search](#Search) + * [Database](#Database) + * [Looping](#Looping) + * [CODASYL](#CODASYL) + * [Throughput](#Throughput) + * [Mixed](#Mixed) + * [Read](#Read) + * [Write](#Write) +* [FAQ](#FAQ) + +## Usage + +### Example + +```go +func main() { + cache, err := ristretto.NewCache(&ristretto.Config{ + NumCounters: 1e7, // number of keys to track frequency of (10M). + MaxCost: 1 << 30, // maximum cost of cache (1GB). + BufferItems: 64, // number of keys per Get buffer. + }) + if err != nil { + panic(err) + } + + // set a value with a cost of 1 + cache.Set("key", "value", 1) + + // wait for value to pass through buffers + time.Sleep(10 * time.Millisecond) + + value, found := cache.Get("key") + if !found { + panic("missing value") + } + fmt.Println(value) + cache.Del("key") +} +``` + +### Config + +The `Config` struct is passed to `NewCache` when creating Ristretto instances (see the example above). + +**NumCounters** `int64` + +NumCounters is the number of 4-bit access counters to keep for admission and eviction. We've seen good performance in setting this to 10x the number of items you expect to keep in the cache when full. + +For example, if you expect each item to have a cost of 1 and MaxCost is 100, set NumCounters to 1,000. Or, if you use variable cost values but expect the cache to hold around 10,000 items when full, set NumCounters to 100,000. The important thing is the *number of unique items* in the full cache, not necessarily the MaxCost value. + +**MaxCost** `int64` + +MaxCost is how eviction decisions are made. For example, if MaxCost is 100 and a new item with a cost of 1 increases total cache cost to 101, 1 item will be evicted. + +MaxCost can also be used to denote the max size in bytes. For example, if MaxCost is 1,000,000 (1MB) and the cache is full with 1,000 1KB items, a new item (that's accepted) would cause 5 1KB items to be evicted. + +MaxCost could be anything as long as it matches how you're using the cost values when calling Set. + +**BufferItems** `int64` + +BufferItems is the size of the Get buffers. The best value we've found for this is 64. + +If for some reason you see Get performance decreasing with lots of contention (you shouldn't), try increasing this value in increments of 64. This is a fine-tuning mechanism and you probably won't have to touch this. + +**Metrics** `bool` + +Metrics is true when you want real-time logging of a variety of stats. The reason this is a Config flag is because there's a 10% throughput performance overhead. + +**OnEvict** `func(hashes [2]uint64, value interface{}, cost int64)` + +OnEvict is called for every eviction. + +**KeyToHash** `func(key interface{}) [2]uint64` + +KeyToHash is the hashing algorithm used for every key. If this is nil, Ristretto has a variety of [defaults depending on the underlying interface type](https://github.com/dgraph-io/ristretto/blob/master/z/z.go#L19-L41). + +Note that if you want 128bit hashes you should use the full `[2]uint64`, +otherwise just fill the `uint64` at the `0` position and it will behave like +any 64bit hash. + +**Cost** `func(value interface{}) int64` + +Cost is an optional function you can pass to the Config in order to evaluate +item cost at runtime, and only for the Set calls that aren't dropped (this is +useful if calculating item cost is particularly expensive and you don't want to +waste time on items that will be dropped anyways). + +To signal to Ristretto that you'd like to use this Cost function: + +1. Set the Cost field to a non-nil function. +2. When calling Set for new items or item updates, use a `cost` of 0. + +## Benchmarks + +The benchmarks can be found in https://github.com/dgraph-io/benchmarks/tree/master/cachebench/ristretto. + +### Hit Ratios + +#### Search + +This trace is described as "disk read accesses initiated by a large commercial +search engine in response to various web search requests." + +

+ +

+ +#### Database + +This trace is described as "a database server running at a commercial site +running an ERP application on top of a commercial database." + +

+ +

+ +#### Looping + +This trace demonstrates a looping access pattern. + +

+ +

+ +#### CODASYL + +This trace is described as "references to a CODASYL database for a one hour +period." + +

+ +

+ +### Throughput + +All throughput benchmarks were ran on an Intel Core i7-8700K (3.7GHz) with 16gb +of RAM. + +#### Mixed + +

+ +

+ +#### Read + +

+ +

+ +#### Write + +

+ +

+ +## FAQ + +### How are you achieving this performance? What shortcuts are you taking? + +We go into detail in the [Ristretto blog post](https://blog.dgraph.io/post/introducing-ristretto-high-perf-go-cache/), but in short: our throughput performance can be attributed to a mix of batching and eventual consistency. Our hit ratio performance is mostly due to an excellent [admission policy](https://arxiv.org/abs/1512.00727) and SampledLFU eviction policy. + +As for "shortcuts," the only thing Ristretto does that could be construed as one is dropping some Set calls. That means a Set call for a new item (updates are guaranteed) isn't guaranteed to make it into the cache. The new item could be dropped at two points: when passing through the Set buffer or when passing through the admission policy. However, this doesn't affect hit ratios much at all as we expect the most popular items to be Set multiple times and eventually make it in the cache. + +### Is Ristretto distributed? + +No, it's just like any other Go library that you can import into your project and use in a single process. diff --git a/vendor/github.com/dgraph-io/ristretto/cache.go b/vendor/github.com/dgraph-io/ristretto/cache.go new file mode 100644 index 0000000000..6fdab869b0 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/cache.go @@ -0,0 +1,488 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// Ristretto is a fast, fixed size, in-memory cache with a dual focus on +// throughput and hit ratio performance. You can easily add Ristretto to an +// existing system and keep the most valuable data where you need it. +package ristretto + +import ( + "bytes" + "errors" + "fmt" + "sync/atomic" + + "github.com/dgraph-io/ristretto/z" +) + +const ( + // TODO: find the optimal value for this or make it configurable + setBufSize = 32 * 1024 +) + +// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission +// policy and a Sampled LFU eviction policy. You can use the same Cache instance +// from as many goroutines as you want. +type Cache struct { + // store is the central concurrent hashmap where key-value items are stored + store store + // policy determines what gets let in to the cache and what gets kicked out + policy policy + // getBuf is a custom ring buffer implementation that gets pushed to when + // keys are read + getBuf *ringBuffer + // setBuf is a buffer allowing us to batch/drop Sets during times of high + // contention + setBuf chan *item + // onEvict is called for item evictions + onEvict func(uint64, uint64, interface{}, int64) + // KeyToHash function is used to customize the key hashing algorithm. + // Each key will be hashed using the provided function. If keyToHash value + // is not set, the default keyToHash function is used. + keyToHash func(interface{}) (uint64, uint64) + // stop is used to stop the processItems goroutine + stop chan struct{} + // cost calculates cost from a value + cost func(value interface{}) int64 + // Metrics contains a running log of important statistics like hits, misses, + // and dropped items + Metrics *Metrics +} + +// Config is passed to NewCache for creating new Cache instances. +type Config struct { + // NumCounters determines the number of counters (keys) to keep that hold + // access frequency information. It's generally a good idea to have more + // counters than the max cache capacity, as this will improve eviction + // accuracy and subsequent hit ratios. + // + // For example, if you expect your cache to hold 1,000,000 items when full, + // NumCounters should be 10,000,000 (10x). Each counter takes up 4 bits, so + // keeping 10,000,000 counters would require 5MB of memory. + NumCounters int64 + // MaxCost can be considered as the cache capacity, in whatever units you + // choose to use. + // + // For example, if you want the cache to have a max capacity of 100MB, you + // would set MaxCost to 100,000,000 and pass an item's number of bytes as + // the `cost` parameter for calls to Set. If new items are accepted, the + // eviction process will take care of making room for the new item and not + // overflowing the MaxCost value. + MaxCost int64 + // BufferItems determines the size of Get buffers. + // + // Unless you have a rare use case, using `64` as the BufferItems value + // results in good performance. + BufferItems int64 + // Metrics determines whether cache statistics are kept during the cache's + // lifetime. There *is* some overhead to keeping statistics, so you should + // only set this flag to true when testing or throughput performance isn't a + // major factor. + Metrics bool + // OnEvict is called for every eviction and passes the hashed key, value, + // and cost to the function. + OnEvict func(key, conflict uint64, value interface{}, cost int64) + // KeyToHash function is used to customize the key hashing algorithm. + // Each key will be hashed using the provided function. If keyToHash value + // is not set, the default keyToHash function is used. + KeyToHash func(key interface{}) (uint64, uint64) + // Cost evaluates a value and outputs a corresponding cost. This function + // is ran after Set is called for a new item or an item update with a cost + // param of 0. + Cost func(value interface{}) int64 +} + +type itemFlag byte + +const ( + itemNew itemFlag = iota + itemDelete + itemUpdate +) + +// item is passed to setBuf so items can eventually be added to the cache +type item struct { + flag itemFlag + key uint64 + conflict uint64 + value interface{} + cost int64 +} + +// NewCache returns a new Cache instance and any configuration errors, if any. +func NewCache(config *Config) (*Cache, error) { + switch { + case config.NumCounters == 0: + return nil, errors.New("NumCounters can't be zero.") + case config.MaxCost == 0: + return nil, errors.New("MaxCost can't be zero.") + case config.BufferItems == 0: + return nil, errors.New("BufferItems can't be zero.") + } + policy := newPolicy(config.NumCounters, config.MaxCost) + cache := &Cache{ + store: newStore(), + policy: policy, + getBuf: newRingBuffer(policy, config.BufferItems), + setBuf: make(chan *item, setBufSize), + onEvict: config.OnEvict, + keyToHash: config.KeyToHash, + stop: make(chan struct{}), + cost: config.Cost, + } + if cache.keyToHash == nil { + cache.keyToHash = z.KeyToHash + } + if config.Metrics { + cache.collectMetrics() + } + // NOTE: benchmarks seem to show that performance decreases the more + // goroutines we have running cache.processItems(), so 1 should + // usually be sufficient + go cache.processItems() + return cache, nil +} + +// Get returns the value (if any) and a boolean representing whether the +// value was found or not. The value can be nil and the boolean can be true at +// the same time. +func (c *Cache) Get(key interface{}) (interface{}, bool) { + if c == nil || key == nil { + return nil, false + } + keyHash, conflictHash := c.keyToHash(key) + c.getBuf.Push(keyHash) + value, ok := c.store.Get(keyHash, conflictHash) + if ok { + c.Metrics.add(hit, keyHash, 1) + } else { + c.Metrics.add(miss, keyHash, 1) + } + return value, ok +} + +// Set attempts to add the key-value item to the cache. If it returns false, +// then the Set was dropped and the key-value item isn't added to the cache. If +// it returns true, there's still a chance it could be dropped by the policy if +// its determined that the key-value item isn't worth keeping, but otherwise the +// item will be added and other items will be evicted in order to make room. +// +// To dynamically evaluate the items cost using the Config.Coster function, set +// the cost parameter to 0 and Coster will be ran when needed in order to find +// the items true cost. +func (c *Cache) Set(key, value interface{}, cost int64) bool { + if c == nil || key == nil { + return false + } + keyHash, conflictHash := c.keyToHash(key) + i := &item{ + flag: itemNew, + key: keyHash, + conflict: conflictHash, + value: value, + cost: cost, + } + // attempt to immediately update hashmap value and set flag to update so the + // cost is eventually updated + if c.store.Update(keyHash, conflictHash, i.value) { + i.flag = itemUpdate + } + // attempt to send item to policy + select { + case c.setBuf <- i: + return true + default: + c.Metrics.add(dropSets, keyHash, 1) + return false + } +} + +// Del deletes the key-value item from the cache if it exists. +func (c *Cache) Del(key interface{}) { + if c == nil || key == nil { + return + } + keyHash, conflictHash := c.keyToHash(key) + c.setBuf <- &item{ + flag: itemDelete, + key: keyHash, + conflict: conflictHash, + } +} + +// Close stops all goroutines and closes all channels. +func (c *Cache) Close() { + // block until processItems goroutine is returned + c.stop <- struct{}{} + close(c.stop) + close(c.setBuf) + c.policy.Close() +} + +// Clear empties the hashmap and zeroes all policy counters. Note that this is +// not an atomic operation (but that shouldn't be a problem as it's assumed that +// Set/Get calls won't be occurring until after this). +func (c *Cache) Clear() { + // block until processItems goroutine is returned + c.stop <- struct{}{} + // swap out the setBuf channel + c.setBuf = make(chan *item, setBufSize) + // clear value hashmap and policy data + c.policy.Clear() + c.store.Clear() + // only reset metrics if they're enabled + if c.Metrics != nil { + c.Metrics.Clear() + } + // restart processItems goroutine + go c.processItems() +} + +// processItems is ran by goroutines processing the Set buffer. +func (c *Cache) processItems() { + for { + select { + case i := <-c.setBuf: + // calculate item cost value if new or update + if i.cost == 0 && c.cost != nil && i.flag != itemDelete { + i.cost = c.cost(i.value) + } + switch i.flag { + case itemNew: + victims, added := c.policy.Add(i.key, i.cost) + if added { + c.store.Set(i.key, i.conflict, i.value) + c.Metrics.add(keyAdd, i.key, 1) + c.Metrics.add(costAdd, i.key, uint64(i.cost)) + } + for _, victim := range victims { + victim.conflict, victim.value = c.store.Del(victim.key, 0) + if c.onEvict != nil { + c.onEvict(victim.key, victim.conflict, victim.value, victim.cost) + } + c.Metrics.add(keyEvict, victim.key, 1) + c.Metrics.add(costEvict, victim.key, uint64(victim.cost)) + } + case itemUpdate: + c.policy.Update(i.key, i.cost) + case itemDelete: + c.policy.Del(i.key) + c.store.Del(i.key, i.conflict) + } + case <-c.stop: + return + } + } +} + +// collectMetrics just creates a new *Metrics instance and adds the pointers +// to the cache and policy instances. +func (c *Cache) collectMetrics() { + c.Metrics = newMetrics() + c.policy.CollectMetrics(c.Metrics) +} + +type metricType int + +const ( + // The following 2 keep track of hits and misses. + hit = iota + miss + // The following 3 keep track of number of keys added, updated and evicted. + keyAdd + keyUpdate + keyEvict + // The following 2 keep track of cost of keys added and evicted. + costAdd + costEvict + // The following keep track of how many sets were dropped or rejected later. + dropSets + rejectSets + // The following 2 keep track of how many gets were kept and dropped on the + // floor. + dropGets + keepGets + // This should be the final enum. Other enums should be set before this. + doNotUse +) + +func stringFor(t metricType) string { + switch t { + case hit: + return "hit" + case miss: + return "miss" + case keyAdd: + return "keys-added" + case keyUpdate: + return "keys-updated" + case keyEvict: + return "keys-evicted" + case costAdd: + return "cost-added" + case costEvict: + return "cost-evicted" + case dropSets: + return "sets-dropped" + case rejectSets: + return "sets-rejected" // by policy. + case dropGets: + return "gets-dropped" + case keepGets: + return "gets-kept" + default: + return "unidentified" + } +} + +// Metrics is a snapshot of performance statistics for the lifetime of a cache +// instance. +type Metrics struct { + all [doNotUse][]*uint64 +} + +func newMetrics() *Metrics { + s := &Metrics{} + for i := 0; i < doNotUse; i++ { + s.all[i] = make([]*uint64, 256) + slice := s.all[i] + for j := range slice { + slice[j] = new(uint64) + } + } + return s +} + +func (p *Metrics) add(t metricType, hash, delta uint64) { + if p == nil { + return + } + valp := p.all[t] + // Avoid false sharing by padding at least 64 bytes of space between two + // atomic counters which would be incremented. + idx := (hash % 25) * 10 + atomic.AddUint64(valp[idx], delta) +} + +func (p *Metrics) get(t metricType) uint64 { + if p == nil { + return 0 + } + valp := p.all[t] + var total uint64 + for i := range valp { + total += atomic.LoadUint64(valp[i]) + } + return total +} + +// Hits is the number of Get calls where a value was found for the corresponding +// key. +func (p *Metrics) Hits() uint64 { + return p.get(hit) +} + +// Misses is the number of Get calls where a value was not found for the +// corresponding key. +func (p *Metrics) Misses() uint64 { + return p.get(miss) +} + +// KeysAdded is the total number of Set calls where a new key-value item was +// added. +func (p *Metrics) KeysAdded() uint64 { + return p.get(keyAdd) +} + +// KeysUpdated is the total number of Set calls where the value was updated. +func (p *Metrics) KeysUpdated() uint64 { + return p.get(keyUpdate) +} + +// KeysEvicted is the total number of keys evicted. +func (p *Metrics) KeysEvicted() uint64 { + return p.get(keyEvict) +} + +// CostAdded is the sum of costs that have been added (successful Set calls). +func (p *Metrics) CostAdded() uint64 { + return p.get(costAdd) +} + +// CostEvicted is the sum of all costs that have been evicted. +func (p *Metrics) CostEvicted() uint64 { + return p.get(costEvict) +} + +// SetsDropped is the number of Set calls that don't make it into internal +// buffers (due to contention or some other reason). +func (p *Metrics) SetsDropped() uint64 { + return p.get(dropSets) +} + +// SetsRejected is the number of Set calls rejected by the policy (TinyLFU). +func (p *Metrics) SetsRejected() uint64 { + return p.get(rejectSets) +} + +// GetsDropped is the number of Get counter increments that are dropped +// internally. +func (p *Metrics) GetsDropped() uint64 { + return p.get(dropGets) +} + +// GetsKept is the number of Get counter increments that are kept. +func (p *Metrics) GetsKept() uint64 { + return p.get(keepGets) +} + +// Ratio is the number of Hits over all accesses (Hits + Misses). This is the +// percentage of successful Get calls. +func (p *Metrics) Ratio() float64 { + if p == nil { + return 0.0 + } + hits, misses := p.get(hit), p.get(miss) + if hits == 0 && misses == 0 { + return 0.0 + } + return float64(hits) / float64(hits+misses) +} + +func (p *Metrics) Clear() { + if p == nil { + return + } + for i := 0; i < doNotUse; i++ { + for j := range p.all[i] { + atomic.StoreUint64(p.all[i][j], 0) + } + } +} + +func (p *Metrics) String() string { + if p == nil { + return "" + } + var buf bytes.Buffer + for i := 0; i < doNotUse; i++ { + t := metricType(i) + fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t)) + } + fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss)) + fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio()) + return buf.String() +} diff --git a/vendor/github.com/dgraph-io/ristretto/go.mod b/vendor/github.com/dgraph-io/ristretto/go.mod new file mode 100644 index 0000000000..2f2a05299f --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/go.mod @@ -0,0 +1,8 @@ +module github.com/dgraph-io/ristretto + +go 1.12 + +require ( + github.com/cespare/xxhash v1.1.0 + github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 +) diff --git a/vendor/github.com/dgraph-io/ristretto/go.sum b/vendor/github.com/dgraph-io/ristretto/go.sum new file mode 100644 index 0000000000..970408a93c --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/go.sum @@ -0,0 +1,8 @@ +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= diff --git a/vendor/github.com/dgraph-io/ristretto/policy.go b/vendor/github.com/dgraph-io/ristretto/policy.go new file mode 100644 index 0000000000..19d74962c7 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/policy.go @@ -0,0 +1,356 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "math" + "sync" + + "github.com/dgraph-io/ristretto/z" +) + +const ( + // lfuSample is the number of items to sample when looking at eviction + // candidates. 5 seems to be the most optimal number [citation needed]. + lfuSample = 5 +) + +// policy is the interface encapsulating eviction/admission behavior. +// +// TODO: remove this interface and just rename defaultPolicy to policy, as we +// are probably only going to use/implement/maintain one policy. +type policy interface { + ringConsumer + // Add attempts to Add the key-cost pair to the Policy. It returns a slice + // of evicted keys and a bool denoting whether or not the key-cost pair + // was added. If it returns true, the key should be stored in cache. + Add(uint64, int64) ([]*item, bool) + // Has returns true if the key exists in the Policy. + Has(uint64) bool + // Del deletes the key from the Policy. + Del(uint64) + // Cap returns the available capacity. + Cap() int64 + // Close stops all goroutines and closes all channels. + Close() + // Update updates the cost value for the key. + Update(uint64, int64) + // Cost returns the cost value of a key or -1 if missing. + Cost(uint64) int64 + // Optionally, set stats object to track how policy is performing. + CollectMetrics(*Metrics) + // Clear zeroes out all counters and clears hashmaps. + Clear() +} + +func newPolicy(numCounters, maxCost int64) policy { + return newDefaultPolicy(numCounters, maxCost) +} + +type defaultPolicy struct { + sync.Mutex + admit *tinyLFU + evict *sampledLFU + itemsCh chan []uint64 + stop chan struct{} + metrics *Metrics +} + +func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy { + p := &defaultPolicy{ + admit: newTinyLFU(numCounters), + evict: newSampledLFU(maxCost), + itemsCh: make(chan []uint64, 3), + stop: make(chan struct{}), + } + go p.processItems() + return p +} + +func (p *defaultPolicy) CollectMetrics(metrics *Metrics) { + p.metrics = metrics + p.evict.metrics = metrics +} + +type policyPair struct { + key uint64 + cost int64 +} + +func (p *defaultPolicy) processItems() { + for { + select { + case items := <-p.itemsCh: + p.Lock() + p.admit.Push(items) + p.Unlock() + case <-p.stop: + return + } + } +} + +func (p *defaultPolicy) Push(keys []uint64) bool { + if len(keys) == 0 { + return true + } + select { + case p.itemsCh <- keys: + p.metrics.add(keepGets, keys[0], uint64(len(keys))) + return true + default: + p.metrics.add(dropGets, keys[0], uint64(len(keys))) + return false + } +} + +func (p *defaultPolicy) Add(key uint64, cost int64) ([]*item, bool) { + p.Lock() + defer p.Unlock() + // can't add an item bigger than entire cache + if cost > p.evict.maxCost { + return nil, false + } + // we don't need to go any further if the item is already in the cache + if has := p.evict.updateIfHas(key, cost); has { + return nil, true + } + // if we got this far, this key doesn't exist in the cache + // + // calculate the remaining room in the cache (usually bytes) + room := p.evict.roomLeft(cost) + if room >= 0 { + // there's enough room in the cache to store the new item without + // overflowing, so we can do that now and stop here + p.evict.add(key, cost) + return nil, true + } + // incHits is the hit count for the incoming item + incHits := p.admit.Estimate(key) + // sample is the eviction candidate pool to be filled via random sampling + // + // TODO: perhaps we should use a min heap here. Right now our time + // complexity is N for finding the min. Min heap should bring it down to + // O(lg N). + sample := make([]*policyPair, 0, lfuSample) + // as items are evicted they will be appended to victims + victims := make([]*item, 0) + // delete victims until there's enough space or a minKey is found that has + // more hits than incoming item. + for ; room < 0; room = p.evict.roomLeft(cost) { + // fill up empty slots in sample + sample = p.evict.fillSample(sample) + // find minimally used item in sample + minKey, minHits, minId, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0) + for i, pair := range sample { + // look up hit count for sample key + if hits := p.admit.Estimate(pair.key); hits < minHits { + minKey, minHits, minId, minCost = pair.key, hits, i, pair.cost + } + } + // if the incoming item isn't worth keeping in the policy, reject. + if incHits < minHits { + p.metrics.add(rejectSets, key, 1) + return victims, false + } + // delete the victim from metadata + p.evict.del(minKey) + // delete the victim from sample + sample[minId] = sample[len(sample)-1] + sample = sample[:len(sample)-1] + // store victim in evicted victims slice + victims = append(victims, &item{ + key: minKey, + conflict: 0, + cost: minCost, + }) + } + p.evict.add(key, cost) + return victims, true +} + +func (p *defaultPolicy) Has(key uint64) bool { + p.Lock() + _, exists := p.evict.keyCosts[key] + p.Unlock() + return exists +} + +func (p *defaultPolicy) Del(key uint64) { + p.Lock() + p.evict.del(key) + p.Unlock() +} + +func (p *defaultPolicy) Cap() int64 { + p.Lock() + capacity := int64(p.evict.maxCost - p.evict.used) + p.Unlock() + return capacity +} + +func (p *defaultPolicy) Update(key uint64, cost int64) { + p.Lock() + p.evict.updateIfHas(key, cost) + p.Unlock() +} + +func (p *defaultPolicy) Cost(key uint64) int64 { + p.Lock() + if cost, found := p.evict.keyCosts[key]; found { + p.Unlock() + return cost + } + p.Unlock() + return -1 +} + +func (p *defaultPolicy) Clear() { + p.Lock() + p.admit.clear() + p.evict.clear() + p.Unlock() +} + +func (p *defaultPolicy) Close() { + // block until p.processItems goroutine is returned + p.stop <- struct{}{} + close(p.stop) + close(p.itemsCh) +} + +// sampledLFU is an eviction helper storing key-cost pairs. +type sampledLFU struct { + keyCosts map[uint64]int64 + maxCost int64 + used int64 + metrics *Metrics +} + +func newSampledLFU(maxCost int64) *sampledLFU { + return &sampledLFU{ + keyCosts: make(map[uint64]int64), + maxCost: maxCost, + } +} + +func (p *sampledLFU) roomLeft(cost int64) int64 { + return p.maxCost - (p.used + cost) +} + +func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair { + if len(in) >= lfuSample { + return in + } + for key, cost := range p.keyCosts { + in = append(in, &policyPair{key, cost}) + if len(in) >= lfuSample { + return in + } + } + return in +} + +func (p *sampledLFU) del(key uint64) { + cost, ok := p.keyCosts[key] + if !ok { + return + } + p.used -= cost + delete(p.keyCosts, key) +} + +func (p *sampledLFU) add(key uint64, cost int64) { + p.keyCosts[key] = cost + p.used += cost +} + +func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool { + if prev, found := p.keyCosts[key]; found { + // update the cost of an existing key, but don't worry about evicting, + // evictions will be handled the next time a new item is added + p.metrics.add(keyUpdate, key, 1) + p.used += cost - prev + p.keyCosts[key] = cost + return true + } + return false +} + +func (p *sampledLFU) clear() { + p.used = 0 + p.keyCosts = make(map[uint64]int64) +} + +// tinyLFU is an admission helper that keeps track of access frequency using +// tiny (4-bit) counters in the form of a count-min sketch. +// tinyLFU is NOT thread safe. +type tinyLFU struct { + freq *cmSketch + door *z.Bloom + incrs int64 + resetAt int64 +} + +func newTinyLFU(numCounters int64) *tinyLFU { + return &tinyLFU{ + freq: newCmSketch(numCounters), + door: z.NewBloomFilter(float64(numCounters), 0.01), + resetAt: numCounters, + } +} + +func (p *tinyLFU) Push(keys []uint64) { + for _, key := range keys { + p.Increment(key) + } +} + +func (p *tinyLFU) Estimate(key uint64) int64 { + hits := p.freq.Estimate(key) + if p.door.Has(key) { + hits += 1 + } + return hits +} + +func (p *tinyLFU) Increment(key uint64) { + // flip doorkeeper bit if not already + if added := p.door.AddIfNotHas(key); !added { + // increment count-min counter if doorkeeper bit is already set. + p.freq.Increment(key) + } + p.incrs++ + if p.incrs >= p.resetAt { + p.reset() + } +} + +func (p *tinyLFU) reset() { + // Zero out incrs. + p.incrs = 0 + // clears doorkeeper bits + p.door.Clear() + // halves count-min counters + p.freq.Reset() +} + +func (p *tinyLFU) clear() { + p.incrs = 0 + p.door.Clear() + p.freq.Clear() +} diff --git a/vendor/github.com/dgraph-io/ristretto/ring.go b/vendor/github.com/dgraph-io/ristretto/ring.go new file mode 100644 index 0000000000..3e9f391676 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/ring.go @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "sync" +) + +// ringConsumer is the user-defined object responsible for receiving and +// processing items in batches when buffers are drained. +type ringConsumer interface { + Push([]uint64) bool +} + +// ringStripe is a singular ring buffer that is not concurrent safe. +type ringStripe struct { + cons ringConsumer + data []uint64 + capa int +} + +func newRingStripe(cons ringConsumer, capa int64) *ringStripe { + return &ringStripe{ + cons: cons, + data: make([]uint64, 0, capa), + capa: int(capa), + } +} + +// Push appends an item in the ring buffer and drains (copies items and +// sends to Consumer) if full. +func (s *ringStripe) Push(item uint64) { + s.data = append(s.data, item) + // if we should drain + if len(s.data) >= s.capa { + // Send elements to consumer. Create a new one. + if s.cons.Push(s.data) { + s.data = make([]uint64, 0, s.capa) + } else { + s.data = s.data[:0] + } + } +} + +// ringBuffer stores multiple buffers (stripes) and distributes Pushed items +// between them to lower contention. +// +// This implements the "batching" process described in the BP-Wrapper paper +// (section III part A). +type ringBuffer struct { + pool *sync.Pool +} + +// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will +// be called when individual stripes are full and need to drain their elements. +func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer { + // LOSSY buffers use a very simple sync.Pool for concurrently reusing + // stripes. We do lose some stripes due to GC (unheld items in sync.Pool + // are cleared), but the performance gains generally outweigh the small + // percentage of elements lost. The performance primarily comes from + // low-level runtime functions used in the standard library that aren't + // available to us (such as runtime_procPin()). + return &ringBuffer{ + pool: &sync.Pool{ + New: func() interface{} { return newRingStripe(cons, capa) }, + }, + } +} + +// Push adds an element to one of the internal stripes and possibly drains if +// the stripe becomes full. +func (b *ringBuffer) Push(item uint64) { + // reuse or create a new stripe + stripe := b.pool.Get().(*ringStripe) + stripe.Push(item) + b.pool.Put(stripe) +} diff --git a/vendor/github.com/dgraph-io/ristretto/sketch.go b/vendor/github.com/dgraph-io/ristretto/sketch.go new file mode 100644 index 0000000000..c2b8c0f4f5 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/sketch.go @@ -0,0 +1,155 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// This package includes multiple probabalistic data structures needed for +// admission/eviction metadata. Most are Counting Bloom Filter variations, but +// a caching-specific feature that is also required is a "freshness" mechanism, +// which basically serves as a "lifetime" process. This freshness mechanism +// was described in the original TinyLFU paper [1], but other mechanisms may +// be better suited for certain data distributions. +// +// [1]: https://arxiv.org/abs/1512.00727 +package ristretto + +import ( + "fmt" + "math/rand" + "time" +) + +// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily +// based on Damian Gryski's CM4 [1]. +// +// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go +type cmSketch struct { + rows [cmDepth]cmRow + seed [cmDepth]uint64 + mask uint64 +} + +const ( + // cmDepth is the number of counter copies to store (think of it as rows) + cmDepth = 4 +) + +func newCmSketch(numCounters int64) *cmSketch { + if numCounters == 0 { + panic("cmSketch: bad numCounters") + } + // get the next power of 2 for better cache performance + numCounters = next2Power(numCounters) + sketch := &cmSketch{mask: uint64(numCounters - 1)} + // initialize rows of counters and seeds + source := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < cmDepth; i++ { + sketch.seed[i] = source.Uint64() + sketch.rows[i] = newCmRow(numCounters) + } + return sketch +} + +// Increment increments the count(ers) for the specified key. +func (s *cmSketch) Increment(hashed uint64) { + for i := range s.rows { + s.rows[i].increment((hashed ^ s.seed[i]) & s.mask) + } +} + +// Estimate returns the value of the specified key. +func (s *cmSketch) Estimate(hashed uint64) int64 { + min := byte(255) + for i := range s.rows { + val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask) + if val < min { + min = val + } + } + return int64(min) +} + +// Reset halves all counter values. +func (s *cmSketch) Reset() { + for _, r := range s.rows { + r.reset() + } +} + +// Clear zeroes all counters. +func (s *cmSketch) Clear() { + for _, r := range s.rows { + r.clear() + } +} + +// cmRow is a row of bytes, with each byte holding two counters +type cmRow []byte + +func newCmRow(numCounters int64) cmRow { + return make(cmRow, numCounters/2) +} + +func (r cmRow) get(n uint64) byte { + return byte(r[n/2]>>((n&1)*4)) & 0x0f +} + +func (r cmRow) increment(n uint64) { + // index of the counter + i := n / 2 + // shift distance (even 0, odd 4) + s := (n & 1) * 4 + // counter value + v := (r[i] >> s) & 0x0f + // only increment if not max value (overflow wrap is bad for LFU) + if v < 15 { + r[i] += 1 << s + } +} + +func (r cmRow) reset() { + // halve each counter + for i := range r { + r[i] = (r[i] >> 1) & 0x77 + } +} + +func (r cmRow) clear() { + // zero each counter + for i := range r { + r[i] = 0 + } +} + +func (r cmRow) string() string { + s := "" + for i := uint64(0); i < uint64(len(r)*2); i++ { + s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f) + } + s = s[:len(s)-1] + return s +} + +// next2Power rounds x up to the next power of 2, if it's not already one. +func next2Power(x int64) int64 { + x-- + x |= x >> 1 + x |= x >> 2 + x |= x >> 4 + x |= x >> 8 + x |= x >> 16 + x |= x >> 32 + x++ + return x +} diff --git a/vendor/github.com/dgraph-io/ristretto/store.go b/vendor/github.com/dgraph-io/ristretto/store.go new file mode 100644 index 0000000000..3f3fbce444 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/store.go @@ -0,0 +1,181 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package ristretto + +import ( + "sync" +) + +type storeItem struct { + key uint64 + conflict uint64 + value interface{} +} + +// store is the interface fulfilled by all hash map implementations in this +// file. Some hash map implementations are better suited for certain data +// distributions than others, so this allows us to abstract that out for use +// in Ristretto. +// +// Every store is safe for concurrent usage. +type store interface { + // Get returns the value associated with the key parameter. + Get(uint64, uint64) (interface{}, bool) + // Set adds the key-value pair to the Map or updates the value if it's + // already present. + Set(uint64, uint64, interface{}) + // Del deletes the key-value pair from the Map. + Del(uint64, uint64) (uint64, interface{}) + // Update attempts to update the key with a new value and returns true if + // successful. + Update(uint64, uint64, interface{}) bool + // Clear clears all contents of the store. + Clear() +} + +// newStore returns the default store implementation. +func newStore() store { + return newShardedMap() +} + +const numShards uint64 = 256 + +type shardedMap struct { + shards []*lockedMap +} + +func newShardedMap() *shardedMap { + sm := &shardedMap{ + shards: make([]*lockedMap, int(numShards)), + } + for i := range sm.shards { + sm.shards[i] = newLockedMap() + } + return sm +} + +func (sm *shardedMap) Get(key, conflict uint64) (interface{}, bool) { + return sm.shards[key%numShards].Get(key, conflict) +} + +func (sm *shardedMap) Set(key, conflict uint64, value interface{}) { + sm.shards[key%numShards].Set(key, conflict, value) +} + +func (sm *shardedMap) Del(key, conflict uint64) (uint64, interface{}) { + return sm.shards[key%numShards].Del(key, conflict) +} + +func (sm *shardedMap) Update(key, conflict uint64, value interface{}) bool { + return sm.shards[key%numShards].Update(key, conflict, value) +} + +func (sm *shardedMap) Clear() { + for i := uint64(0); i < numShards; i++ { + sm.shards[i].Clear() + } +} + +type lockedMap struct { + sync.RWMutex + data map[uint64]storeItem +} + +func newLockedMap() *lockedMap { + return &lockedMap{ + data: make(map[uint64]storeItem), + } +} + +func (m *lockedMap) Get(key, conflict uint64) (interface{}, bool) { + m.RLock() + item, ok := m.data[key] + m.RUnlock() + if !ok { + return nil, false + } + if conflict != 0 && (conflict != item.conflict) { + return nil, false + } + return item.value, true +} + +func (m *lockedMap) Set(key, conflict uint64, value interface{}) { + m.Lock() + item, ok := m.data[key] + if !ok { + m.data[key] = storeItem{ + key: key, + conflict: conflict, + value: value, + } + m.Unlock() + return + } + if conflict != 0 && (conflict != item.conflict) { + m.Unlock() + return + } + m.data[key] = storeItem{ + key: key, + conflict: conflict, + value: value, + } + m.Unlock() +} + +func (m *lockedMap) Del(key, conflict uint64) (uint64, interface{}) { + m.Lock() + item, ok := m.data[key] + if !ok { + m.Unlock() + return 0, nil + } + if conflict != 0 && (conflict != item.conflict) { + m.Unlock() + return 0, nil + } + delete(m.data, key) + m.Unlock() + return item.conflict, item.value +} + +func (m *lockedMap) Update(key, conflict uint64, value interface{}) bool { + m.Lock() + item, ok := m.data[key] + if !ok { + m.Unlock() + return false + } + if conflict != 0 && (conflict != item.conflict) { + m.Unlock() + return false + } + m.data[key] = storeItem{ + key: key, + conflict: conflict, + value: value, + } + m.Unlock() + return true +} + +func (m *lockedMap) Clear() { + m.Lock() + m.data = make(map[uint64]storeItem) + m.Unlock() +} diff --git a/vendor/github.com/dgraph-io/ristretto/z/LICENSE b/vendor/github.com/dgraph-io/ristretto/z/LICENSE new file mode 100644 index 0000000000..0860cbfe85 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/z/LICENSE @@ -0,0 +1,64 @@ +bbloom.go + +// The MIT License (MIT) +// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt + +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +rtutil.go + +// MIT License + +// Copyright (c) 2019 Ewan Chou + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +Modifications: + +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + diff --git a/vendor/github.com/dgraph-io/ristretto/z/README.md b/vendor/github.com/dgraph-io/ristretto/z/README.md new file mode 100644 index 0000000000..6d77e146eb --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/z/README.md @@ -0,0 +1,129 @@ +## bbloom: a bitset Bloom filter for go/golang +=== + +package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter. + +NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom + +=== + +changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache. + +This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html". +Nonetheless bbloom should work with any other form of entries. + +~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~ + +Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash ) + +Minimum hashset size is: 512 ([4]uint64; will be set automatically). + +###install + +```sh +go get github.com/AndreasBriese/bbloom +``` + +###test ++ change to folder ../bbloom ++ create wordlist in file "words.txt" (you might use `python permut.py`) ++ run 'go test -bench=.' within the folder + +```go +go test -bench=. +``` + +~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~ + +using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively) + +### usage + +after installation add + +```go +import ( + ... + "github.com/AndreasBriese/bbloom" + ... + ) +``` + +at your header. In the program use + +```go +// create a bloom filter for 65536 items and 1 % wrong-positive ratio +bf := bbloom.New(float64(1<<16), float64(0.01)) + +// or +// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly +// bf = bbloom.New(float64(650000), float64(7)) +// or +bf = bbloom.New(650000.0, 7.0) + +// add one item +bf.Add([]byte("butter")) + +// Number of elements added is exposed now +// Note: ElemNum will not be included in JSON export (for compatability to older version) +nOfElementsInFilter := bf.ElemNum + +// check if item is in the filter +isIn := bf.Has([]byte("butter")) // should be true +isNotIn := bf.Has([]byte("Butter")) // should be false + +// 'add only if item is new' to the bloomfilter +added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set +added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new + +// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS +// add one item +bf.AddTS([]byte("peanutbutter")) +// check if item is in the filter +isIn = bf.HasTS([]byte("peanutbutter")) // should be true +isNotIn = bf.HasTS([]byte("peanutButter")) // should be false +// 'add only if item is new' to the bloomfilter +added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set +added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new + +// convert to JSON ([]byte) +Json := bf.JSONMarshal() + +// bloomfilters Mutex is exposed for external un-/locking +// i.e. mutex lock while doing JSON conversion +bf.Mtx.Lock() +Json = bf.JSONMarshal() +bf.Mtx.Unlock() + +// restore a bloom filter from storage +bfNew := bbloom.JSONUnmarshal(Json) + +isInNew := bfNew.Has([]byte("butter")) // should be true +isNotInNew := bfNew.Has([]byte("Butter")) // should be false + +``` + +to work with the bloom filter. + +### why 'fast'? + +It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint: + + + Bloom filter (filter size 524288, 7 hashlocs) + github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op) + github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op) + github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op) + github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op) + + github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op) + github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op) + github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op) + github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op) + github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op) + github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op) + +(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz) + + +With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions. diff --git a/vendor/github.com/dgraph-io/ristretto/z/bbloom.go b/vendor/github.com/dgraph-io/ristretto/z/bbloom.go new file mode 100644 index 0000000000..14b967fcd4 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/z/bbloom.go @@ -0,0 +1,202 @@ +// The MIT License (MIT) +// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt + +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package z + +import ( + "bytes" + "encoding/json" + "log" + "math" + "unsafe" +) + +// helper +var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128} + +func getSize(ui64 uint64) (size uint64, exponent uint64) { + if ui64 < uint64(512) { + ui64 = uint64(512) + } + size = uint64(1) + for size < ui64 { + size <<= 1 + exponent++ + } + return size, exponent +} + +func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) { + size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2) + locs := math.Ceil(float64(0.69314718056) * size / numEntries) + return uint64(size), uint64(locs) +} + +// NewBloomFilter returns a new bloomfilter. +func NewBloomFilter(params ...float64) (bloomfilter *Bloom) { + var entries, locs uint64 + if len(params) == 2 { + if params[1] < 1 { + entries, locs = calcSizeByWrongPositives(params[0], params[1]) + } else { + entries, locs = uint64(params[0]), uint64(params[1]) + } + } else { + log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations))" + + " i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries)," + + " float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))") + } + size, exponent := getSize(entries) + bloomfilter = &Bloom{ + sizeExp: exponent, + size: size - 1, + setLocs: locs, + shift: 64 - exponent, + } + bloomfilter.Size(size) + return bloomfilter +} + +// Bloom filter +type Bloom struct { + bitset []uint64 + ElemNum uint64 + sizeExp uint64 + size uint64 + setLocs uint64 + shift uint64 +} + +// <--- http://www.cse.yorku.ca/~oz/hash.html +// modified Berkeley DB Hash (32bit) +// hash is casted to l, h = 16bit fragments +// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) { +// hash := uint64(len(*b)) +// for _, c := range *b { +// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash +// } +// h = hash >> bl.shift +// l = hash << bl.shift >> bl.shift +// return l, h +// } + +// Add adds hash of a key to the bloomfilter. +func (bl *Bloom) Add(hash uint64) { + h := hash >> bl.shift + l := hash << bl.shift >> bl.shift + for i := uint64(0); i < bl.setLocs; i++ { + bl.Set((h + i*l) & bl.size) + bl.ElemNum++ + } +} + +// Has checks if bit(s) for entry hash is/are set, +// returns true if the hash was added to the Bloom Filter. +func (bl Bloom) Has(hash uint64) bool { + h := hash >> bl.shift + l := hash << bl.shift >> bl.shift + for i := uint64(0); i < bl.setLocs; i++ { + switch bl.IsSet((h + i*l) & bl.size) { + case false: + return false + } + } + return true +} + +// AddIfNotHas only Adds hash, if it's not present in the bloomfilter. +// Returns true if hash was added. +// Returns false if hash was already registered in the bloomfilter. +func (bl *Bloom) AddIfNotHas(hash uint64) bool { + if bl.Has(hash) { + return false + } + bl.Add(hash) + return true +} + +// Size makes Bloom filter with as bitset of size sz. +func (bl *Bloom) Size(sz uint64) { + bl.bitset = make([]uint64, sz>>6) +} + +// Clear resets the Bloom filter. +func (bl *Bloom) Clear() { + for i := range bl.bitset { + bl.bitset[i] = 0 + } +} + +// Set sets the bit[idx] of bitset. +func (bl *Bloom) Set(idx uint64) { + ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) + *(*uint8)(ptr) |= mask[idx%8] +} + +// IsSet checks if bit[idx] of bitset is set, returns true/false. +func (bl *Bloom) IsSet(idx uint64) bool { + ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)) + r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1 + return r == 1 +} + +// bloomJSONImExport +// Im/Export structure used by JSONMarshal / JSONUnmarshal +type bloomJSONImExport struct { + FilterSet []byte + SetLocs uint64 +} + +// NewWithBoolset takes a []byte slice and number of locs per entry, +// returns the bloomfilter with a bitset populated according to the input []byte. +func newWithBoolset(bs *[]byte, locs uint64) *Bloom { + bloomfilter := NewBloomFilter(float64(len(*bs)<<3), float64(locs)) + for i, b := range *bs { + *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b + } + return bloomfilter +} + +// JSONUnmarshal takes JSON-Object (type bloomJSONImExport) as []bytes +// returns bloom32 / bloom64 object. +func JSONUnmarshal(dbData []byte) *Bloom { + bloomImEx := bloomJSONImExport{} + json.Unmarshal(dbData, &bloomImEx) + buf := bytes.NewBuffer(bloomImEx.FilterSet) + bs := buf.Bytes() + bf := newWithBoolset(&bs, bloomImEx.SetLocs) + return bf +} + +// JSONMarshal returns JSON-object (type bloomJSONImExport) as []byte. +func (bl Bloom) JSONMarshal() []byte { + bloomImEx := bloomJSONImExport{} + bloomImEx.SetLocs = bl.setLocs + bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3) + for i := range bloomImEx.FilterSet { + bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) + + uintptr(i))) + } + data, err := json.Marshal(bloomImEx) + if err != nil { + log.Fatal("json.Marshal failed: ", err) + } + return data +} diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.go b/vendor/github.com/dgraph-io/ristretto/z/rtutil.go new file mode 100644 index 0000000000..16aff0c9dc --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/z/rtutil.go @@ -0,0 +1,64 @@ +// MIT License + +// Copyright (c) 2019 Ewan Chou + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: + +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. + +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package z + +import ( + "unsafe" +) + +// NanoTime returns the current time in nanoseconds from a monotonic clock. +//go:linkname NanoTime runtime.nanotime +func NanoTime() int64 + +// CPUTicks is a faster alternative to NanoTime to measure time duration. +//go:linkname CPUTicks runtime.cputicks +func CPUTicks() int64 + +type stringStruct struct { + str unsafe.Pointer + len int +} + +//go:noescape +//go:linkname memhash runtime.memhash +func memhash(p unsafe.Pointer, h, s uintptr) uintptr + +// MemHash is the hash function used by go map, it utilizes available hardware instructions(behaves +// as aeshash if aes instruction is available). +// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash. +func MemHash(data []byte) uint64 { + ss := (*stringStruct)(unsafe.Pointer(&data)) + return uint64(memhash(ss.str, 0, uintptr(ss.len))) +} + +// MemHashString is the hash function used by go map, it utilizes available hardware instructions +// (behaves as aeshash if aes instruction is available). +// NOTE: The hash seed changes for every process. So, this cannot be used as a persistent hash. +func MemHashString(str string) uint64 { + ss := (*stringStruct)(unsafe.Pointer(&str)) + return uint64(memhash(ss.str, 0, uintptr(ss.len))) +} + +// FastRand is a fast thread local random function. +//go:linkname FastRand runtime.fastrand +func FastRand() uint32 diff --git a/vendor/github.com/dgraph-io/ristretto/z/rtutil.s b/vendor/github.com/dgraph-io/ristretto/z/rtutil.s new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/dgraph-io/ristretto/z/z.go b/vendor/github.com/dgraph-io/ristretto/z/z.go new file mode 100644 index 0000000000..a9c06b3720 --- /dev/null +++ b/vendor/github.com/dgraph-io/ristretto/z/z.go @@ -0,0 +1,56 @@ +/* + * Copyright 2019 Dgraph Labs, Inc. and Contributors + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package z + +import ( + "github.com/cespare/xxhash" +) + +// TODO: Figure out a way to re-use memhash for the second uint64 hash, we +// already know that appending bytes isn't reliable for generating a +// second hash (see Ristretto PR #88). +// +// We also know that while the Go runtime has a runtime memhash128 +// function, it's not possible to use it to generate [2]uint64 or +// anything resembling a 128bit hash, even though that's exactly what +// we need in this situation. +func KeyToHash(key interface{}) (uint64, uint64) { + if key == nil { + return 0, 0 + } + switch k := key.(type) { + case uint64: + return k, 0 + case string: + raw := []byte(k) + return MemHash(raw), xxhash.Sum64(raw) + case []byte: + return MemHash(k), xxhash.Sum64(k) + case byte: + return uint64(k), 0 + case int: + return uint64(k), 0 + case int32: + return uint64(k), 0 + case uint32: + return uint64(k), 0 + case int64: + return uint64(k), 0 + default: + panic("Key type not supported") + } +} diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 0000000000..8990f85b56 --- /dev/null +++ b/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,771 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/docs/generate-authors.sh`. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Abreto FU +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akhil Mohan +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Albin Kerouanton +Aleksa Sarai +Aleksander Piotrowski +Alessandro Boch +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anca Iordache +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +Andrii Berehuliak +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Arko Dasgupta +Arnaud Porterie +Arthur Peka +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Brian Wieder +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +ChaYoung You +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Corey Farrell +Corey Quon +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +Daisuke Ito +dalanlan +Damien Nadé +Dan Cotora +Daniel Artine +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Daniil Nikolenko +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Djordje Lukic +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dominik Braun +Don Kjer +Dong Chen +Doug Davis +Drew Erny +Ed Costello +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eric Curtin +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Hupfeld +Felix Rabe +Filip Jareš +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Goksu Toprak +Gou Rao +Grant Reaber +Greg Pflaum +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Hector S +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +Hugo Gabriel Eyherabide +huqun +Huu Nguyen +Hyzhou Zhy +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Heiss +Jason Plum +Jay Kamat +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Abbey +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jon Johnson +Jonatas Baldin +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonh Wendell +Jordan Jennings +Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +Kevin Woblick +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lee Gaines +Lei Jitang +Lennie +Leo Gallucci +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lifubang +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukas Heeren +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Maciej Kalisz +Madhav Puri +Madhu Venugopal +Madhur Batra +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marco Mariani +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Ileana +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathieu Champlon +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Mauro Porras P +Max Shytikov +Maxime Petazzoni +Mei ChunTao +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Miroslav Gula +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Morten Hekkvang +Moysés Borges +Mrunal Patel +muicoder +Muthukumar R +Máximo Cuadros +Mårten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +Odin Ugedal +ohmystack +Olle Jonsson +Olli Janatuinen +Oscar Wieman +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Mulders +Paul Weaver +Pavel Pospisil +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +pidster +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Rahul Zoldyck +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Samarth Shah +Sambuddha Basu +Sami Tabet +Samuel Cochran +Samuel Karp +Santhosh Manohar +Sargun Dhillon +Saswat Bhattacharya +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Simon Heimberg +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +squeegels <1674195+squeegels@users.noreply.github.com> +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Taylor Jones +Tejaswini Duggaraju +Tengfei Wang +Teppei Fukuda +Thatcher Peskens +Thibault Coupin +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Sampson +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulrich Bareth +Ulysses Souza +Umesh Yadav +Valentin Lorentz +Venkateswara Reddy Bukkasamudram +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Álex González +Álvaro Lázaro +Átila Camurça Alves +徐俊杰 diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 0000000000..9c8e20ab85 --- /dev/null +++ b/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 0000000000..58b19b6d15 --- /dev/null +++ b/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go new file mode 100644 index 0000000000..93275f3d98 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -0,0 +1,163 @@ +package config + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" + contextsDir = "contexts" +) + +var ( + initConfigDir = new(sync.Once) + configDir string + homeDir string +) + +// resetHomeDir is used in testing to reset the "homeDir" package variable to +// force re-lookup of the home directory between tests. +func resetHomeDir() { + homeDir = "" +} + +func getHomeDir() string { + if homeDir == "" { + homeDir = homedir.Get() + } + return homeDir +} + +// resetConfigDir is used in testing to reset the "configDir" package variable +// and its sync.Once to force re-lookup between tests. +func resetConfigDir() { + configDir = "" + initConfigDir = new(sync.Once) +} + +func setConfigDir() { + if configDir != "" { + return + } + configDir = os.Getenv("DOCKER_CONFIG") + if configDir == "" { + configDir = filepath.Join(getHomeDir(), configFileDir) + } +} + +// Dir returns the directory the configuration file is stored in +func Dir() string { + initConfigDir.Do(setConfigDir) + return configDir +} + +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + +// SetDir sets the directory the configuration file is stored in +func SetDir(dir string) { + configDir = filepath.Clean(dir) +} + +// Path returns the path to a file relative to the config dir +func Path(p ...string) (string, error) { + path := filepath.Join(append([]string{Dir()}, p...)...) + if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { + return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) + } + return path, nil +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file +var printLegacyFileWarning bool + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + printLegacyFileWarning = false + + if configDir == "" { + configDir = Dir() + } + + filename := filepath.Join(configDir, ConfigFileName) + configFile := configfile.New(filename) + + // Try happy path first - latest config file + if file, err := os.Open(filename); err == nil { + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = errors.Wrap(err, filename) + } + return configFile, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return configFile, errors.Wrap(err, filename) + } + + // Can't find latest config file so check for the old one + filename = filepath.Join(getHomeDir(), oldConfigfile) + if file, err := os.Open(filename); err == nil { + printLegacyFileWarning = true + defer file.Close() + if err := configFile.LegacyLoadFromReader(file); err != nil { + return configFile, errors.Wrap(err, filename) + } + } + return configFile, nil +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { + configFile, err := Load(Dir()) + if err != nil { + fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) + } + if printLegacyFileWarning { + _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") + } + if !configFile.ContainsAuth() { + configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) + } + return configFile +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go new file mode 100644 index 0000000000..dc9f39eb7e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -0,0 +1,415 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexServer = "https://index.docker.io/v1/" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + Experimental string `json:"experimental,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` + Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Plugins map[string]map[string]string `json:"plugins,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` +} + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` +} + +// KubernetesConfig contains Kubernetes orchestrator settings +type KubernetesConfig struct { + AllNamespaces string `json:"allNamespaces,omitempty"` +} + +// New initializes an empty configuration file for the given filename 'fn' +func New(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + Plugins: make(map[string]map[string]string), + Aliases: make(map[string]string), + } +} + +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return errors.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return errors.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexServer + configFile.AuthConfigs[defaultIndexServer] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + if ac.Auth != "" { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return checkKubernetesConfiguration(configFile.Kubernetes) +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// GetAuthConfigs returns the mapping of repo to auth configuration +func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { + return configFile.AuthConfigs +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + // User-Agent header is automatically set, and should not be stored in the configuration + for v := range configFile.HTTPHeaders { + if strings.EqualFold(v, "User-Agent") { + delete(configFile.HTTPHeaders, v) + } + } + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() (retErr error) { + if configFile.Filename == "" { + return errors.Errorf("Can't save config with empty filename") + } + + dir := filepath.Dir(configFile.Filename) + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) + if err != nil { + return err + } + defer func() { + temp.Close() + if retErr != nil { + if err := os.Remove(temp.Name()); err != nil { + logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") + } + } + }() + + err = configFile.SaveToWriter(temp) + if err != nil { + return err + } + + if err := temp.Close(); err != nil { + return errors.Wrap(err, "error closing temp file") + } + + // Handle situation where the configfile is a symlink + cfgFile := configFile.Filename + if f, err := os.Readlink(cfgFile); err == nil { + cfgFile = f + } + + // Try copying the current config file (if any) ownership and permissions + copyFilePermissions(cfgFile, temp.Name()) + return os.Rename(temp.Name(), cfgFile) +} + +// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and +// then checking this against any environment variables provided to the container +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { + var cfgKey string + + if _, ok := configFile.Proxies[host]; !ok { + cfgKey = "default" + } else { + cfgKey = host + } + + config := configFile.Proxies[cfgKey] + permitted := map[string]*string{ + "HTTP_PROXY": &config.HTTPProxy, + "HTTPS_PROXY": &config.HTTPSProxy, + "NO_PROXY": &config.NoProxy, + "FTP_PROXY": &config.FTPProxy, + } + m := runOpts + if m == nil { + m = make(map[string]*string) + } + for k := range permitted { + if *permitted[k] == "" { + continue + } + if _, ok := m[k]; !ok { + m[k] = permitted[k] + } + if _, ok := m[strings.ToLower(k)]; !ok { + m[strings.ToLower(k)] = permitted[k] + } + } + return m +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", errors.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", errors.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} + +// GetCredentialsStore returns a new credentials store from the settings in the +// configuration file +func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { + return newNativeStore(configFile, helper) + } + return credentials.NewFileStore(configFile) +} + +// var for unit testing. +var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { + return credentials.NewNativeStore(configFile, helperSuffix) +} + +// GetAuthConfig for a repository from the credential store +func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { + return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { + if c.CredentialHelpers != nil && registryHostname != "" { + if helper, exists := c.CredentialHelpers[registryHostname]; exists { + return helper + } + } + return c.CredentialsStore +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + addAll := func(from map[string]types.AuthConfig) { + for reg, ac := range from { + auths[reg] = ac + } + } + + defaultStore := configFile.GetCredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(newAuths) + + // Auth configs from a registry-specific helper should override those from the default store. + for registryHostname := range configFile.CredentialHelpers { + newAuth, err := configFile.GetAuthConfig(registryHostname) + if err != nil { + return nil, err + } + auths[registryHostname] = newAuth + } + return auths, nil +} + +// GetFilename returns the file name that this config file is based on. +func (configFile *ConfigFile) GetFilename() string { + return configFile.Filename +} + +// PluginConfig retrieves the requested option for the given plugin. +func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { + if configFile.Plugins == nil { + return "", false + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + return "", false + } + value, ok := pluginConfig[option] + return value, ok +} + +// SetPluginConfig sets the option to the given value for the given +// plugin. Passing a value of "" will remove the option. If removing +// the final config item for a given plugin then also cleans up the +// overall plugin entry. +func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { + if configFile.Plugins == nil { + configFile.Plugins = make(map[string]map[string]string) + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + pluginConfig = make(map[string]string) + configFile.Plugins[pluginname] = pluginConfig + } + if value != "" { + pluginConfig[option] = value + } else { + delete(pluginConfig, option) + } + if len(pluginConfig) == 0 { + delete(configFile.Plugins, pluginname) + } +} + +func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { + if kubeConfig == nil { + return nil + } + switch kubeConfig.AllNamespaces { + case "": + case "enabled": + case "disabled": + default: + return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go new file mode 100644 index 0000000000..3ca65c6140 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package configfile + +import ( + "os" + "syscall" +) + +// copyFilePermissions copies file ownership and permissions from "src" to "dst", +// ignoring any error during the process. +func copyFilePermissions(src, dst string) { + var ( + mode os.FileMode = 0600 + uid, gid int + ) + + fi, err := os.Stat(src) + if err != nil { + return + } + if fi.Mode().IsRegular() { + mode = fi.Mode() + } + if err := os.Chmod(dst, mode); err != nil { + return + } + + uid = int(fi.Sys().(*syscall.Stat_t).Uid) + gid = int(fi.Sys().(*syscall.Stat_t).Gid) + + if uid > 0 && gid > 0 { + _ = os.Chown(dst, uid, gid) + } +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go new file mode 100644 index 0000000000..42fffc39ad --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go @@ -0,0 +1,5 @@ +package configfile + +func copyFilePermissions(src, dst string) { + // TODO implement for Windows +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go new file mode 100644 index 0000000000..28d58ec48d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go new file mode 100644 index 0000000000..402235bff0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -0,0 +1,21 @@ +package credentials + +import ( + exec "golang.org/x/sys/execabs" +) + +// DetectDefaultStore return the default credentials store for the platform if +// the store executable is available. +func DetectDefaultStore(store string) string { + platformDefault := defaultCredentialsStore() + + // user defined or no default for platform + if store != "" || platformDefault == "" { + return store + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { + return platformDefault + } + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go new file mode 100644 index 0000000000..5d42dec622 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "osxkeychain" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go new file mode 100644 index 0000000000..a9012c6d4a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -0,0 +1,13 @@ +package credentials + +import ( + "os/exec" +) + +func defaultCredentialsStore() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go new file mode 100644 index 0000000000..3028168ac2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -0,0 +1,7 @@ +// +build !windows,!darwin,!linux + +package credentials + +func defaultCredentialsStore() string { + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go new file mode 100644 index 0000000000..bb799ca61b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "wincred" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go new file mode 100644 index 0000000000..e509820b73 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -0,0 +1,81 @@ +package credentials + +import ( + "strings" + + "github.com/docker/cli/cli/config/types" +) + +type store interface { + Save() error + GetAuthConfigs() map[string]types.AuthConfig + GetFilename() string +} + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file store +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file store) Store { + return &fileStore{file: file} +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.GetAuthConfigs(), serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.GetAuthConfigs()[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.GetAuthConfigs() { + if serverAddress == ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.GetAuthConfigs(), nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func (c *fileStore) GetFilename() string { + return c.file.GetFilename() +} + +func (c *fileStore) IsFileStore() bool { + return true +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go new file mode 100644 index 0000000000..afe542cc3c --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -0,0 +1,143 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file store, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keychain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 0000000000..056af6b842 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go new file mode 100644 index 0000000000..128da447b5 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go @@ -0,0 +1,283 @@ +// Package commandconn provides a net.Conn implementation that can be used for +// proxying (or emulating) stream via a custom command. +// +// For example, to provide an http.Client that can connect to a Docker daemon +// running in a Docker container ("DIND"): +// +// httpClient := &http.Client{ +// Transport: &http.Transport{ +// DialContext: func(ctx context.Context, _network, _addr string) (net.Conn, error) { +// return commandconn.New(ctx, "docker", "exec", "-it", containerID, "docker", "system", "dial-stdio") +// }, +// }, +// } +package commandconn + +import ( + "bytes" + "context" + "fmt" + "io" + "net" + "os" + "runtime" + "strings" + "sync" + "syscall" + "time" + + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + exec "golang.org/x/sys/execabs" +) + +// New returns net.Conn +func New(ctx context.Context, cmd string, args ...string) (net.Conn, error) { + var ( + c commandConn + err error + ) + c.cmd = exec.CommandContext(ctx, cmd, args...) + // we assume that args never contains sensitive information + logrus.Debugf("commandconn: starting %s with %v", cmd, args) + c.cmd.Env = os.Environ() + c.cmd.SysProcAttr = &syscall.SysProcAttr{} + setPdeathsig(c.cmd) + createSession(c.cmd) + c.stdin, err = c.cmd.StdinPipe() + if err != nil { + return nil, err + } + c.stdout, err = c.cmd.StdoutPipe() + if err != nil { + return nil, err + } + c.cmd.Stderr = &stderrWriter{ + stderrMu: &c.stderrMu, + stderr: &c.stderr, + debugPrefix: fmt.Sprintf("commandconn (%s):", cmd), + } + c.localAddr = dummyAddr{network: "dummy", s: "dummy-0"} + c.remoteAddr = dummyAddr{network: "dummy", s: "dummy-1"} + return &c, c.cmd.Start() +} + +// commandConn implements net.Conn +type commandConn struct { + cmd *exec.Cmd + cmdExited bool + cmdWaitErr error + cmdMutex sync.Mutex + stdin io.WriteCloser + stdout io.ReadCloser + stderrMu sync.Mutex + stderr bytes.Buffer + stdioClosedMu sync.Mutex // for stdinClosed and stdoutClosed + stdinClosed bool + stdoutClosed bool + localAddr net.Addr + remoteAddr net.Addr +} + +// killIfStdioClosed kills the cmd if both stdin and stdout are closed. +func (c *commandConn) killIfStdioClosed() error { + c.stdioClosedMu.Lock() + stdioClosed := c.stdoutClosed && c.stdinClosed + c.stdioClosedMu.Unlock() + if !stdioClosed { + return nil + } + return c.kill() +} + +// killAndWait tries sending SIGTERM to the process before sending SIGKILL. +func killAndWait(cmd *exec.Cmd) error { + var werr error + if runtime.GOOS != "windows" { + werrCh := make(chan error) + go func() { werrCh <- cmd.Wait() }() + cmd.Process.Signal(syscall.SIGTERM) + select { + case werr = <-werrCh: + case <-time.After(3 * time.Second): + cmd.Process.Kill() + werr = <-werrCh + } + } else { + cmd.Process.Kill() + werr = cmd.Wait() + } + return werr +} + +// kill returns nil if the command terminated, regardless to the exit status. +func (c *commandConn) kill() error { + var werr error + c.cmdMutex.Lock() + if c.cmdExited { + werr = c.cmdWaitErr + } else { + werr = killAndWait(c.cmd) + c.cmdWaitErr = werr + c.cmdExited = true + } + c.cmdMutex.Unlock() + if werr == nil { + return nil + } + wExitErr, ok := werr.(*exec.ExitError) + if ok { + if wExitErr.ProcessState.Exited() { + return nil + } + } + return errors.Wrapf(werr, "commandconn: failed to wait") +} + +func (c *commandConn) onEOF(eof error) error { + // when we got EOF, the command is going to be terminated + var werr error + c.cmdMutex.Lock() + if c.cmdExited { + werr = c.cmdWaitErr + } else { + werrCh := make(chan error) + go func() { werrCh <- c.cmd.Wait() }() + select { + case werr = <-werrCh: + c.cmdWaitErr = werr + c.cmdExited = true + case <-time.After(10 * time.Second): + c.cmdMutex.Unlock() + c.stderrMu.Lock() + stderr := c.stderr.String() + c.stderrMu.Unlock() + return errors.Errorf("command %v did not exit after %v: stderr=%q", c.cmd.Args, eof, stderr) + } + } + c.cmdMutex.Unlock() + if werr == nil { + return eof + } + c.stderrMu.Lock() + stderr := c.stderr.String() + c.stderrMu.Unlock() + return errors.Errorf("command %v has exited with %v, please make sure the URL is valid, and Docker 18.09 or later is installed on the remote host: stderr=%s", c.cmd.Args, werr, stderr) +} + +func ignorableCloseError(err error) bool { + errS := err.Error() + ss := []string{ + os.ErrClosed.Error(), + } + for _, s := range ss { + if strings.Contains(errS, s) { + return true + } + } + return false +} + +func (c *commandConn) CloseRead() error { + // NOTE: maybe already closed here + if err := c.stdout.Close(); err != nil && !ignorableCloseError(err) { + logrus.Warnf("commandConn.CloseRead: %v", err) + } + c.stdioClosedMu.Lock() + c.stdoutClosed = true + c.stdioClosedMu.Unlock() + if err := c.killIfStdioClosed(); err != nil { + logrus.Warnf("commandConn.CloseRead: %v", err) + } + return nil +} + +func (c *commandConn) Read(p []byte) (int, error) { + n, err := c.stdout.Read(p) + if err == io.EOF { + err = c.onEOF(err) + } + return n, err +} + +func (c *commandConn) CloseWrite() error { + // NOTE: maybe already closed here + if err := c.stdin.Close(); err != nil && !ignorableCloseError(err) { + logrus.Warnf("commandConn.CloseWrite: %v", err) + } + c.stdioClosedMu.Lock() + c.stdinClosed = true + c.stdioClosedMu.Unlock() + if err := c.killIfStdioClosed(); err != nil { + logrus.Warnf("commandConn.CloseWrite: %v", err) + } + return nil +} + +func (c *commandConn) Write(p []byte) (int, error) { + n, err := c.stdin.Write(p) + if err == io.EOF { + err = c.onEOF(err) + } + return n, err +} + +func (c *commandConn) Close() error { + var err error + if err = c.CloseRead(); err != nil { + logrus.Warnf("commandConn.Close: CloseRead: %v", err) + } + if err = c.CloseWrite(); err != nil { + logrus.Warnf("commandConn.Close: CloseWrite: %v", err) + } + return err +} + +func (c *commandConn) LocalAddr() net.Addr { + return c.localAddr +} +func (c *commandConn) RemoteAddr() net.Addr { + return c.remoteAddr +} +func (c *commandConn) SetDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetDeadline(%v)", t) + return nil +} +func (c *commandConn) SetReadDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetReadDeadline(%v)", t) + return nil +} +func (c *commandConn) SetWriteDeadline(t time.Time) error { + logrus.Debugf("unimplemented call: SetWriteDeadline(%v)", t) + return nil +} + +type dummyAddr struct { + network string + s string +} + +func (d dummyAddr) Network() string { + return d.network +} + +func (d dummyAddr) String() string { + return d.s +} + +type stderrWriter struct { + stderrMu *sync.Mutex + stderr *bytes.Buffer + debugPrefix string +} + +func (w *stderrWriter) Write(p []byte) (int, error) { + logrus.Debugf("%s%s", w.debugPrefix, string(p)) + w.stderrMu.Lock() + if w.stderr.Len() > 4096 { + w.stderr.Reset() + } + n, err := w.stderr.Write(p) + w.stderrMu.Unlock() + return n, err +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_linux.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_linux.go new file mode 100644 index 0000000000..1cdd788cc0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_linux.go @@ -0,0 +1,10 @@ +package commandconn + +import ( + "os/exec" + "syscall" +) + +func setPdeathsig(cmd *exec.Cmd) { + cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go new file mode 100644 index 0000000000..ab07166724 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/pdeathsig_nolinux.go @@ -0,0 +1,10 @@ +// +build !linux + +package commandconn + +import ( + "os/exec" +) + +func setPdeathsig(cmd *exec.Cmd) { +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go new file mode 100644 index 0000000000..6448500d63 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_unix.go @@ -0,0 +1,13 @@ +// +build !windows + +package commandconn + +import ( + "os/exec" +) + +func createSession(cmd *exec.Cmd) { + // for supporting ssh connection helper with ProxyCommand + // https://github.com/docker/cli/issues/1707 + cmd.SysProcAttr.Setsid = true +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_windows.go b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_windows.go new file mode 100644 index 0000000000..b926074500 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/commandconn/session_windows.go @@ -0,0 +1,8 @@ +package commandconn + +import ( + "os/exec" +) + +func createSession(cmd *exec.Cmd) { +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/connhelper.go b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go new file mode 100644 index 0000000000..9ac9d6744d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/connhelper.go @@ -0,0 +1,68 @@ +// Package connhelper provides helpers for connecting to a remote daemon host with custom logic. +package connhelper + +import ( + "context" + "net" + "net/url" + + "github.com/docker/cli/cli/connhelper/commandconn" + "github.com/docker/cli/cli/connhelper/ssh" + "github.com/pkg/errors" +) + +// ConnectionHelper allows to connect to a remote host with custom stream provider binary. +type ConnectionHelper struct { + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + Host string // dummy URL used for HTTP requests. e.g. "http://docker" +} + +// GetConnectionHelper returns Docker-specific connection helper for the given URL. +// GetConnectionHelper returns nil without error when no helper is registered for the scheme. +// +// ssh://@ URL requires Docker 18.09 or later on the remote host. +func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) { + return getConnectionHelper(daemonURL, nil) +} + +// GetConnectionHelperWithSSHOpts returns Docker-specific connection helper for +// the given URL, and accepts additional options for ssh connections. It returns +// nil without error when no helper is registered for the scheme. +// +// Requires Docker 18.09 or later on the remote host. +func GetConnectionHelperWithSSHOpts(daemonURL string, sshFlags []string) (*ConnectionHelper, error) { + return getConnectionHelper(daemonURL, sshFlags) +} + +func getConnectionHelper(daemonURL string, sshFlags []string) (*ConnectionHelper, error) { + u, err := url.Parse(daemonURL) + if err != nil { + return nil, err + } + switch scheme := u.Scheme; scheme { + case "ssh": + sp, err := ssh.ParseURL(daemonURL) + if err != nil { + return nil, errors.Wrap(err, "ssh host connection is not valid") + } + return &ConnectionHelper{ + Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { + return commandconn.New(ctx, "ssh", append(sshFlags, sp.Args("docker", "system", "dial-stdio")...)...) + }, + Host: "http://docker.example.com", + }, nil + } + // Future version may support plugins via ~/.docker/config.json. e.g. "dind" + // See docker/cli#889 for the previous discussion. + return nil, err +} + +// GetCommandConnectionHelper returns Docker-specific connection helper constructed from an arbitrary command. +func GetCommandConnectionHelper(cmd string, flags ...string) (*ConnectionHelper, error) { + return &ConnectionHelper{ + Dialer: func(ctx context.Context, network, addr string) (net.Conn, error) { + return commandconn.New(ctx, cmd, flags...) + }, + Host: "http://docker.example.com", + }, nil +} diff --git a/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go new file mode 100644 index 0000000000..bde01ae7f7 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/connhelper/ssh/ssh.go @@ -0,0 +1,64 @@ +// Package ssh provides the connection helper for ssh:// URL. +package ssh + +import ( + "net/url" + + "github.com/pkg/errors" +) + +// ParseURL parses URL +func ParseURL(daemonURL string) (*Spec, error) { + u, err := url.Parse(daemonURL) + if err != nil { + return nil, err + } + if u.Scheme != "ssh" { + return nil, errors.Errorf("expected scheme ssh, got %q", u.Scheme) + } + + var sp Spec + + if u.User != nil { + sp.User = u.User.Username() + if _, ok := u.User.Password(); ok { + return nil, errors.New("plain-text password is not supported") + } + } + sp.Host = u.Hostname() + if sp.Host == "" { + return nil, errors.Errorf("no host specified") + } + sp.Port = u.Port() + if u.Path != "" { + return nil, errors.Errorf("extra path after the host: %q", u.Path) + } + if u.RawQuery != "" { + return nil, errors.Errorf("extra query after the host: %q", u.RawQuery) + } + if u.Fragment != "" { + return nil, errors.Errorf("extra fragment after the host: %q", u.Fragment) + } + return &sp, err +} + +// Spec of SSH URL +type Spec struct { + User string + Host string + Port string +} + +// Args returns args except "ssh" itself combined with optional additional command args +func (sp *Spec) Args(add ...string) []string { + var args []string + if sp.User != "" { + args = append(args, "-l", sp.User) + } + if sp.Port != "" { + args = append(args, "-p", sp.Port) + } + args = append(args, "--", sp.Host) + args = append(args, add...) + return args +} diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 0000000000..71327dca72 --- /dev/null +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 0000000000..978df7eabb --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 0000000000..2d71fc5e9f --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 0000000000..2f66cca87a --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 0000000000..7860349320 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 0000000000..6d9bb4b62a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 0000000000..d77e70473e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,40 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 0000000000..d1e8826c6d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 0000000000..2c3ebe1653 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 0000000000..6e3f1ccc41 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 0000000000..1ea555e2af --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 0000000000..d1d0434cb5 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program("store") + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program("get") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program("erase") + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program("list") + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 0000000000..0183c06393 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,57 @@ +package client + +import ( + "fmt" + "io" + "os" + + exec "golang.org/x/sys/execabs" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + programCmd.Env = os.Environ() + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 0000000000..da8b594e7f --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,186 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + var err error + if len(os.Args) != 2 { + err = fmt.Errorf("Usage: %s ", os.Args[0]) + } + + if err == nil { + err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + } + + if err != nil { + fmt.Fprintf(os.Stdout, "%v\n", err) + os.Exit(1) + } +} + +// HandleCommand uses a helper and a key to run a credential action. +func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { + switch key { + case "store": + return Store(helper, in) + case "get": + return Get(helper, in, out) + case "erase": + return Erase(helper, in) + case "list": + return List(helper, out) + case "version": + return PrintVersion(out) + } + return fmt.Errorf("Unknown credential action `%s`", key) +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + resp := Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + } + + buffer.Reset() + if err := json.NewEncoder(buffer).Encode(resp); err != nil { + return err + } + + fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +//List returns all the serverURLs of keys in +//the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +//PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + fmt.Fprintln(writer, Version) + return nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 0000000000..fe6a5aef45 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,102 @@ +package credentials + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + _, ok := err.(errCredentialsNotFound) + return ok +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + _, ok := err.(errCredentialsMissingServerURL) + return ok +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + _, ok := err.(errCredentialsMissingUsername) + return ok +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 0000000000..135acd254d --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 0000000000..185e367961 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,4 @@ +package credentials + +// Version holds a string describing the current version +const Version = "0.6.4" diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 0000000000..dffacff112 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,2175 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Hnatiw +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Adam Avilla +Adam Dobrawy +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Pointer +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adnan Khan +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akash Gupta +Akhil Mohan +Akihiro Matsushima +Akihiro Suda +Akim Demaille +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Hoyle +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Albin Kerouanton +Alejandro González Hevia +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Goodman +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandre Jomin +Alexandru Sfirlogea +Alexei Margasov +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amr Gawish +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anca Iordache +Anchal Agrawal +Anda Xu +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Denisse Gómez +Andrea Luzzardi +Andrea Turli +Andreas Elvers +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrei Vagin +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew He +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Pennebaker +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Kolomentsev +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankit Jain +Ankush Agarwal +Anonmily +Anran Qiao +Anshul Pundir +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arko Dasgupta +Arnaud Lefebvre +Arnaud Porterie +Arnaud Rebillout +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asad Saeeduddin +Asbjørn Enge +averagehuman +Avi Das +Avi Kivity +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Gould +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Baker +Benjamin Boudreau +Benjamin Yolken +Benny Ng +Benoit Chesneau +Bernerd Schaefer +Bernhard M. Wiedemann +Bert Goethals +Bertrand Roussel +Bevisy Zhang +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Bily Zhang +Bin Liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boqin Qin +Boris Pruessmann +Boshi Lian +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brett Randall +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Brielle Broder +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Catalin Pirvu +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander Govindarajan +Chanhun Jeong +Chao Wang +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Cheng-mean Liu +Chengfei Shang +Chengguang Xu +chenyuzhu +Chetan Birajdar +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris McKinnel +Chris McKinnel +Chris Price +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Telfer +Chris Wahl +Chris Weyl +Chris White +Christian Berendt +Christian Brauner +Christian Böhme +Christian Muehlhaeuser +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +Christophe Mehay +Christophe Troestler +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Norman +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Cody Roseborough +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Panisset +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Corbin Coleman +Corey Farrell +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Ariza +Cristian Staretu +cristiano balducci +Cristina Yenyxe Gonzalez Garcia +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Williams +Dani Hodovic +Dani Louca +Daniel Antlinger +Daniel Black +Daniel Dao +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Sweet +Daniel Von Fange +Daniel Watkins +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Danny Berger +Danny Milosavljevic +Danny Yates +Danyal Khaliq +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Goodchild +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Chung +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David McKay +David P Hilton +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Wang <00107082@163.com> +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Debayan De +Deborah Gertrude Digges +deed02392 +Deep Debroy +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devon Estes +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Mandalidis +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Sharshakov +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominic Tubach +Dominic Yin +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donghwa Kim +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Douglas Curtis +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Drew Hubl +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elango Sivanandam +Elena Morozova +Eli Uriegas +Elias Faxö +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Davtyan +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Soderstrom +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Ethan Bell +Ethan Mosbaugh +Euan Kemp +Eugen Krizo +Eugene Yakubovich +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeniy Makhrov +Evgeny Shmarnev +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabian Kramm +Fabian Lauer +Fabian Raetz +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangming Fang +Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felipe Ruhland +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Feng Yan +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +Flavio Castelli +Flavio Crisciani +Florian +Florian Klein +Florian Maier +Florian Noeding +Florian Schmaus +Florian Weingarten +Florin Asavoaie +Florin Patan +fonglh +Foysal Iqbal +Francesc Campoy +Francesco Mari +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +frankyang +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +Frieder Bluemle +Fu JinLin +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +Gaurav Singh +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoffrey Bachelet +Geon Kim +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim Feiken +Ghislain Bourgeois +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +Giovan Isa Musthofa +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Goldwyn Rodrigues +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Millar +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Stephens +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Guri +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Haichao Yang +haikuoliu +Hakan Özler +Hamish Hutchings +Hannes Ljungberg +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harald Niesche +Harley Laue +Harold Cooper +Harrison Turton +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xiaoxi +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hiroshi Hatake +Hiroyuki Sasagawa +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +Hongxu Jia +Honza Pokorny +Hsing-Hui Hsu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +HuanHuan Ye +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +Hyzhou Zhy +Iago López Galeiras +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Chen +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Igor Karpovich +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +Ilya Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Innovimax +Isaac Dupree +Isabel Jimenez +Isaiah Grace +Isao Jonas +Iskander Sharipov +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jack Laxson +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Vallejo +Jacob Wen +Jaime Cepeda +Jaivish Kothari +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Turnbull +James Watkins-Harvey +Jamie Hannaford +Jamshid Afshar +Jan Chren +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason A. Donenfeld +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean Rouge +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeeva S. Chelladhurai +Jeff Anderson +Jeff Hajewski +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +Jhon Honce +Ji.Zhilong +Jian Liao +Jian Zhang +Jiang Jinyang +Jie Luo +Jie Ma +Jihyun Hwang +Jilles Oldenbeuving +Jim Alateras +Jim Ehrismann +Jim Galasyn +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +Jinsoo Park +Jintao Zhang +Jiri Appl +Jiri Popelka +Jiuyue Ma +Jiří Župka +Joao Fernandes +Joao Trindade +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Surrell +Jon Wedaman +Jonas Dohse +Jonas Heinrich +Jonas Pfenniger +Jonathan A. Schweder +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Choy +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan Arentsen +Jordan Jennings +Jordan Sissel +Jorge Marin +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Bonczkowski +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +Joyce Jang +JP +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julien Pivotto +Julio Guerra +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justen Martin +Justin Cormack +Justin Force +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérémy Leherpeur +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu (Kennan) +Kamil Domański +Kamjar Gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Karanth +Karthik Nayak +Kasper Fabæch Brandt +Kate Heddleston +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Kazuhiro Sera +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Ken Reese +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kenta Tada +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin Meredith +Kevin P. Kucharczyk +Kevin Parsons +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +Krystian Wojcicki +Kun Zhang +Kunal Kushwaha +Kunal Tyagi +Kyle Conroy +Kyle Linden +Kyle Wuolle +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Gong +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Li Yi +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Lifubang +Lihua Tang +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Lotus Fenn +Louis Delossantos +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Lucas Molas +Lucas Silvestre +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Heeren +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Rüger +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcel Edmund Franke +Marcelo Horacio Fortino +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Martins +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark Jeromin +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Oates +Mark Parker +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Markus Kortlang +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Muzatko +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masato Ohba +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Champlon +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Rickard +Matt Robenolt +Matt Schurenko +Matt Williams +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mosesohn +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +Mattias Jernberg +Mauricio Garavaglia +mauriyouth +Max Harmathy +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Maximiliano Maccanti +Maxwell +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Nussbaum +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michael Zhao +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michał Gryko +Michiel de Jong +Mickaël Fortunato +Mickaël Remars +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Bush +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Estes +Mike Gaffney +Mike Goelzer +Mike Leone +Mike Lundy +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +Mizuki Urushida +mlarcher +Mohammad Banikazemi +Mohammad Nasirifar +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mrfly +Mrunal Patel +Muayyad Alsadi +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nace Oroz +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Natasha Jarus +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Adcock +Nick DeCoursin +Nick Irvine +Nick Neisen +Nick Parker +Nick Payne +Nick Russo +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolas Sterchele +Nicolas V Castet +Nicolás Hock Isaza +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Edigaryev +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Meyerhans +Noah Treuhaft +NobodyOnSE +noducks +Nolan Darilek +Noriki Nakamura +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +Odin Ugedal +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Oliver Reason +Olivier Gambier +Olle Jonsson +Olli Janatuinen +Olly Pomeroy +Omri Shiv +Oriol Francès +Oskar Niburski +Otto Kekäläinen +Ouyang Liduo +Ovidio Mallo +Panagiotis Moustafellos +Paolo G. Giarrusso +Pascal +Pascal Bach +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +Patrik Cyvoct +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Matěja +Pavel Pletenev +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Pawel Konczalski +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Kang +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Alexander Etling +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Pradip Dhara +Prasanna Gautam +Pratik Karki +Prayag Verma +Priya Wadhwa +Projjol Banerji +Przemek Hejman +Pure White +pysqz +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Radostin Stoyanov +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +RaviTeja Pothana +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +Renaud Gaubert +Rhys Hiltner +Ri Xu +Ricardo N Feliciano +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Stern +Robert Terhaar +Robert Wallis +Robert Wang +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +Robin Thoni +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rohit Kapur +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Mazur +Roman Strashkin +Ron Smits +Ron Williams +Rong Gao +Rong Zhang +Rongxiang Song +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Cao +Rui Lopes +Ruilin Li +Runshen Zhu +Russ Magee +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Simmen +Ryan Stelly +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Ryo Nakao +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sakeven Jiang +Salahuddin Khan +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sam Whited +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Sargun Dhillon +Sascha Andres +Sascha Grunert +SataQiu +Satnam Singh +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Sergio Lopez +Serhat Gülçiçek +SeungUkLee +Sevki Hasirci +Shane Canon +Shane da Silva +Shaun Kaasten +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shijun Qin +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +Shu-Wai Chow +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silas Sewell +Silvan Jegen +Simão Reis +Simei He +Simon Barendse +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Simon Vikstrom +Sindhu S +Sjoerd Langkemper +skanehira +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Staf Wagemakers +Stanislav Bondarenko +Stanislav Levin +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephan Spindler +Stephen Benjamin +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Stig Larsson +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sune Keller +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien HOUZÉ +Sébastien Luttringer +Sébastien Stormacq +Tabakhase +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi Colin +Tatsuki Sugiura +Tatsushi Inagaki +Taylan Isikdemir +Taylor Jones +Ted M. Young +Tehmasp Chaudhri +Tejaswini Duggaraju +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas Léveil +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Ti Zhou +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Till Wegmüller +Tim +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Sweeney +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +Trace Andreason +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +Trishna Guha +Tristan Carel +Troy Denton +Tycho Andersen +Tyler Brock +Tyler Brown +Tzu-Jung Lee +uhayate +Ulysse Carion +Umesh Yadav +Utz Bacher +vagrant +Vaidas Jablonskis +vanderliang +Velko Ivanov +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Vikram bir Singh +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Boulineau +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vlastimil Zeman +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +Wang Chao +Wang Guoliang +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +wanghuaiqing +Ward Vandewege +WarheadsSE +Wassim Dhif +Wayne Chang +Wayne Song +Weerasak Chongnguluam +Wei Fu +Wei Wu +Wei-Ting Kuo +weipeng +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenjun Tang +Wenkai Yin +wenlxie +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Wiktor Kwapisiewicz +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +Wilson Júnior +Wing-Kam Wong +WiseTrem +Wolfgang Powisch +Wonjun Kim +xamyzhao +Xian Chaobo +Xianglin Gao +Xianlu Bird +Xiao YongBiao +XiaoBing Jiang +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xiaoxu Chen +Xiaoyu Zhang +xichengliudui <1693291525@qq.com> +xiekeyang +Ximo Guanter Gonzálbez +Xinbo Weng +Xinfeng Liu +Xinzi Zhou +Xiuming Chen +Xuecong Liao +xuzhaokui +Yadnyawalkya Tale +Yahya +YAMADA Tsuyoshi +Yamasaki Masahide +Yan Feng +Yang Bai +Yang Pengfei +yangchenliang +Yanqiang Miao +Yao Zaiyong +Yash Murty +Yassine Tijani +Yasunori Mahata +Yazhong Liu +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongxin Li +Yongzhi Pan +Yosef Fertel +You-Sheng Yang (楊有勝) +youcai +Youcef YEKHLEF +Yu Changchun +Yu Chengxia +Yu Peng +Yu-Ju Hong +Yuan Sun +Yuanhong Peng +Yue Zhang +Yuhao Fang +Yuichiro Kaneko +Yunxiang Huang +Yurii Rashkovskii +Yusuf Tarık Günaydın +Yves Junqueira +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenhai Gao +Zhenkun Bi +zhipengzuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Ziheng Liu +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +Zou Yu +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +屈骏 +徐俊杰 +慕陶 +搏通 +黄艳红00139573 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 0000000000..6d8d58fb67 --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 0000000000..58b19b6d15 --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/docker/docker/api/README.md new file mode 100644 index 0000000000..f136c3433a --- /dev/null +++ b/vendor/github.com/docker/docker/api/README.md @@ -0,0 +1,42 @@ +# Working on the Engine API + +The Engine API is an HTTP API used by the command-line client to communicate with the daemon. It can also be used by third-party software to control the daemon. + +It consists of various components in this repository: + +- `api/swagger.yaml` A Swagger definition of the API. +- `api/types/` Types shared by both the client and server, representing various objects, options, responses, etc. Most are written manually, but some are automatically generated from the Swagger definition. See [#27919](https://github.com/docker/docker/issues/27919) for progress on this. +- `cli/` The command-line client. +- `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. +- `daemon/` The daemon, which serves the API. + +## Swagger definition + +The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: + +1. Automatically generate documentation. +2. Automatically generate the Go server and client. (A work-in-progress.) +3. Provide a machine readable version of the API for introspecting what it can do, automatically generating clients for other languages, etc. + +## Updating the API documentation + +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. + +The file is split into two main sections: + +- `definitions`, which defines re-usable objects used in requests and responses +- `paths`, which defines the API endpoints (and some inline objects which don't need to be reusable) + +To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. + +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). + +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. + +## Viewing the API documentation + +When you make edits to `swagger.yaml`, you may want to check the generated API documentation to ensure it renders correctly. + +Run `make swagger-docs` and a preview will be running at `http://localhost`. Some of the styling may be incorrect, but you'll be able to ensure that it is generating the correct documentation. + +The production documentation is generated by vendoring `swagger.yaml` into [docker/docker.github.io](https://github.com/docker/docker.github.io). diff --git a/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/docker/docker/api/common.go new file mode 100644 index 0000000000..1565e2af64 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common.go @@ -0,0 +1,11 @@ +package api // import "github.com/docker/docker/api" + +// Common constants for daemon and client. +const ( + // DefaultVersion of Current REST API + DefaultVersion = "1.41" + + // NoBaseImageSpecifier is the symbol used by the FROM + // command to specify that no base image is to be used. + NoBaseImageSpecifier = "scratch" +) diff --git a/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/docker/docker/api/common_unix.go new file mode 100644 index 0000000000..504b0c90d7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +const MinVersion = "1.12" diff --git a/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/docker/docker/api/common_windows.go new file mode 100644 index 0000000000..590ba5479b --- /dev/null +++ b/vendor/github.com/docker/docker/api/common_windows.go @@ -0,0 +1,8 @@ +package api // import "github.com/docker/docker/api" + +// MinVersion represents Minimum REST API version supported +// Technically the first daemon API version released on Windows is v1.25 in +// engine version 1.13. However, some clients are explicitly using downlevel +// APIs (e.g. docker-compose v2.1 file format) and that is just too restrictive. +// Hence also allowing 1.24 on Windows. +const MinVersion string = "1.24" diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 0000000000..f07a02737f --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 0000000000..bada4a8e3c --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,11425 @@ +# A Swagger 2.0 (a.k.a. OpenAPI) definition of the Engine API. +# +# This is used for generating API documentation and the types used by the +# client/server. See api/README.md for more information. +# +# Some style notes: +# - This file is used by ReDoc, which allows GitHub Flavored Markdown in +# descriptions. +# - There is no maximum line length, for ease of editing and pretty diffs. +# - operationIds are in the format "NounVerb", with a singular noun. + +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.41" +info: + title: "Docker Engine API" + version: "1.41" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Engine API is an HTTP API served by Docker Engine. It is the API the + Docker client uses to communicate with the Engine, so everything the Docker + client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` + is `GET /containers/json`). The notable exception is running containers, + which consists of several API calls. + + # Errors + + The API uses standard HTTP status codes to indicate the success or failure + of the API call. The body of the response will be JSON in the following + format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. + + If you omit the version-prefix, the current version of the API (v1.41) is used. + For example, calling `/info` is the same as calling `/v1.41/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. + + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. + + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. + + + # Authentication + + Authentication for registries is handled client side. The client has to send + authentication details to various endpoints that need to communicate with + registries, such as `POST /images/(name)/push`. These are sent as + `X-Registry-Auth` header as a [base64url encoded](https://tools.ietf.org/html/rfc4648#section-5) + (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this + structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/SystemAuth), + you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +# The tags on paths define the menu sections in the ReDoc documentation, so +# the usage of tags must make sense for that: +# - They should be singular, not plural. +# - There should not be too many tags, or the menu becomes unwieldy. For +# example, it is preferable to add a path to the "System" tag instead of +# creating a tag with a single path in it. +# - The order of tags in this list defines the order in the menu. +tags: + # Primary objects + - name: "Container" + x-displayName: "Containers" + description: | + Create and manage containers. + - name: "Image" + x-displayName: "Images" + - name: "Network" + x-displayName: "Networks" + description: | + Networks are user-defined networks that containers can be attached to. + See the [networking documentation](https://docs.docker.com/network/) + for more information. + - name: "Volume" + x-displayName: "Volumes" + description: | + Create and manage persistent storage that can be attached to containers. + - name: "Exec" + x-displayName: "Exec" + description: | + Run new commands inside running containers. Refer to the + [command-line reference](https://docs.docker.com/engine/reference/commandline/exec/) + for more information. + + To exec a command in a container, you first need to create an exec instance, + then start it. These two API endpoints are wrapped up in a single command-line + command, `docker exec`. + + # Swarm things + - name: "Swarm" + x-displayName: "Swarm" + description: | + Engines can be clustered together in a swarm. Refer to the + [swarm mode documentation](https://docs.docker.com/engine/swarm/) + for more information. + - name: "Node" + x-displayName: "Nodes" + description: | + Nodes are instances of the Engine participating in a swarm. Swarm mode + must be enabled for these endpoints to work. + - name: "Service" + x-displayName: "Services" + description: | + Services are the definitions of tasks to run on a swarm. Swarm mode must + be enabled for these endpoints to work. + - name: "Task" + x-displayName: "Tasks" + description: | + A task is a container running on a swarm. It is the atomic scheduling unit + of swarm. Swarm mode must be enabled for these endpoints to work. + - name: "Secret" + x-displayName: "Secrets" + description: | + Secrets are sensitive data that can be used by services. Swarm mode must + be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm + mode must be enabled for these endpoints to work. + # System things + - name: "Plugin" + x-displayName: "Plugins" + - name: "System" + x-displayName: "System" + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + description: "Host IP address that the container's port is mapped to" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp", "sctp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + type: "string" + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + - "npipe" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + Consistency: + description: "The consistency requirement for the mount: `default`, `consistent`, `cached`, or `delegated`." + type: "string" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to + restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is + added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - Empty string means not to restart + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "" + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: | + If `on-failure` is used, the number of times to retry before giving up. + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: | + An integer value representing this container's relative CPU weight + versus other containers. + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + format: "int64" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: | + Path to `cgroups` under which the container's `cgroup` is created. If + the path is not absolute, the path is considered to be relative to the + `cgroups` path of the init process. Cgroups are created if they do not + already exist. + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form: + + ``` + [{"Path": "device_path", "Weight": weight}] + ``` + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form: + + ``` + [{"Path": "device_path", "Rate": rate}] + ``` + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: | + Microseconds of CPU time that the container can get in a CPU period. + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: | + The length of a CPU real-time period in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: | + The length of a CPU real-time runtime in microseconds. Set to 0 to + allocate no time allocated to real-time tasks. + type: "integer" + format: "int64" + CpusetCpus: + description: | + CPUs in which to allow execution (e.g., `0-3`, `0,1`). + type: "string" + example: "0-3" + CpusetMems: + description: | + Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only + effective on NUMA systems. + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DeviceCgroupRules: + description: "a list of cgroup rules to apply to the container" + type: "array" + items: + type: "string" + example: "c 13:* rwm" + DeviceRequests: + description: | + A list of requests for devices to be sent to device drivers. + type: "array" + items: + $ref: "#/definitions/DeviceRequest" + KernelMemory: + description: | + Kernel memory limit in bytes. + +


+ + > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated + > `kmem.limit_in_bytes`. + type: "integer" + format: "int64" + example: 209715200 + KernelMemoryTCP: + description: "Hard limit for kernel TCP buffer memory (in bytes)." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: | + Total memory limit (memory + swap). Set as `-1` to enable unlimited + swap. + type: "integer" + format: "int64" + MemorySwappiness: + description: | + Tune a container's memory swappiness behavior. Accepts an integer + between 0 and 100. + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCpus: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + PidsLimit: + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` + to not change. + type: "integer" + format: "int64" + x-nullable: true + Ulimits: + description: | + A list of resource limits to set in the container. For example: + + ``` + {"Name": "nofile", "Soft": 1024, "Hard": 2048} + ``` + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are + mutually exclusive. The order of precedence is `CPUCount` first, then + `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: | + Maximum IO in bytes per second for the container system drive + (Windows only). + type: "integer" + format: "int64" + + Limit: + description: | + An object describing a limit on resources which can be requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + Pids: + description: | + Limits the maximum number of PIDs in the container. Set `0` for unlimited. + type: "integer" + format: "int64" + default: 0 + example: 100 + + ResourceObject: + description: | + An object describing the resources which can be advertised by a node and + requested by a task. + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + example: 4000000000 + MemoryBytes: + type: "integer" + format: "int64" + example: 8272408576 + GenericResources: + $ref: "#/definitions/GenericResources" + + GenericResources: + description: | + User-defined resources can be either Integer resources (e.g, `SSD=3`) or + String resources (e.g, `GPU=UUID1`). + type: "array" + items: + type: "object" + properties: + NamedResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "string" + DiscreteResourceSpec: + type: "object" + properties: + Kind: + type: "string" + Value: + type: "integer" + format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + HealthConfig: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `[]` inherit healthcheck from image or parent image + - `["NONE"]` disable healthcheck + - `["CMD", args...]` exec arguments directly + - `["CMD-SHELL", command]` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: | + The time to wait between checks in nanoseconds. It should be 0 or at + least 1000000 (1 ms). 0 means inherit. + type: "integer" + Timeout: + description: | + The time to wait before considering the check to have hung. It should + be 0 or at least 1000000 (1 ms). 0 means inherit. + type: "integer" + Retries: + description: | + The number of consecutive failures needed to consider a container as + unhealthy. 0 means inherit. + type: "integer" + StartPeriod: + description: | + Start period for the container to initialize before starting + health-retries countdown in nanoseconds. It should be 0 or at least + 1000000 (1 ms). 0 means inherit. + type: "integer" + + Health: + description: | + Health stores information about the container's healthcheck results. + type: "object" + properties: + Status: + description: | + Status is one of `none`, `starting`, `healthy` or `unhealthy` + + - "none" Indicates there is no healthcheck + - "starting" Starting indicates that the container is not yet ready + - "healthy" Healthy indicates that the container is running correctly + - "unhealthy" Unhealthy indicates that the container has a problem + type: "string" + enum: + - "none" + - "starting" + - "healthy" + - "unhealthy" + example: "healthy" + FailingStreak: + description: "FailingStreak is the number of consecutive failures" + type: "integer" + example: 0 + Log: + type: "array" + description: | + Log contains the last few results (oldest first) + items: + x-nullable: true + $ref: "#/definitions/HealthcheckResult" + + HealthcheckResult: + description: | + HealthcheckResult stores information about a single run of a healthcheck probe + type: "object" + properties: + Start: + description: | + Date and time at which this check started in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "date-time" + example: "2020-01-04T10:44:24.496525531Z" + End: + description: | + Date and time at which this check ended in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2020-01-04T10:45:21.364524523Z" + ExitCode: + description: | + ExitCode meanings: + + - `0` healthy + - `1` unhealthy + - `2` reserved (considered unhealthy) + - other values: error running probe + type: "integer" + example: 0 + Output: + description: "Output from last check" + type: "string" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding + is a string in one of these forms: + + - `host-src:container-dest[:options]` to bind-mount a host path + into the container. Both `host-src`, and `container-dest` must + be an _absolute_ path. + - `volume-name:container-dest[:options]` to bind-mount a volume + managed by a volume driver into the container. `container-dest` + must be an _absolute_ path. + + `options` is an optional, comma-delimited list of: + + - `nocopy` disables automatic copying of data from the container + path to the volume. The `nocopy` flag only applies to named volumes. + - `[ro|rw]` mounts a volume read-only or read-write, respectively. + If omitted or set to `rw`, volumes are mounted read-write. + - `[z|Z]` applies SELinux labels to allow or deny multiple containers + to read and write to the same volume. + - `z`: a _shared_ content label is applied to the content. This + label indicates that multiple containers can share the volume + content, for both reading and writing. + - `Z`: a _private unshared_ label is applied to the content. + This label indicates that only the current container can use + a private volume. Labeling systems such as SELinux require + proper labels to be placed on volume content that is mounted + into a container. Without a label, the security system can + prevent a container's processes from using the content. By + default, the labels set by the host operating system are not + modified. + - `[[r]shared|[r]slave|[r]private]` specifies mount + [propagation behavior](https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt). + This only applies to bind-mounted volumes, not internal volumes + or named volumes. Mount propagation requires the source mount + point (the location where the source directory is mounted in the + host operating system) to have the correct propagation properties. + For shared volumes, the source mount point must be set to `shared`. + For slave volumes, the mount must be set to either `shared` or + `slave`. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: | + Network mode to use for this container. Supported standard values + are: `bridge`, `host`, `none`, and `container:`. Any + other value is taken as a custom network's name to which this + container should connect to. + PortBindings: + $ref: "#/definitions/PortMap" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: | + Automatically remove the container when the container's process + exits. This has no effect if `RestartPolicy` is set. + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: | + A list of volumes to inherit from another container, specified in + the form `[:]`. + items: + type: "string" + Mounts: + description: | + Specification for mounts to be added to the container. + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: | + A list of kernel capabilities to add to the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CapDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the container. Conflicts + with option 'Capabilities'. + items: + type: "string" + CgroupnsMode: + type: "string" + enum: + - "private" + - "host" + description: | + cgroup namespace mode for the container. Possible values are: + + - `"private"`: the container runs in its own private cgroup namespace + - `"host"`: use the host system's cgroup namespace + + If not specified, the daemon default is used, which can either be `"private"` + or `"host"`, depending on daemon version, kernel support and configuration. + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` + file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + IpcMode: + type: "string" + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: | + A list of links for the container in the form `container_name:alias`. + items: + type: "string" + OomScoreAdj: + type: "integer" + description: | + An integer value containing the score given to the container in + order to tune OOM killer preferences. + example: 500 + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be + either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when + the container starts. The allocated port might be changed when + restarting the container. + + The port is selected from the ephemeral port range that depends on + the kernel. For example, on Linux the range is defined by + `/proc/sys/net/ipv4/ip_local_port_range`. + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: | + A map of container directories which should be replaced by tmpfs + mounts, and their corresponding mount options. For example: + + ``` + { "/run": "rw,noexec,nosuid,size=65536k" } + ``` + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: | + Sets the usernamespace mode for the container when usernamespace + remapping option is enabled. + ShmSize: + type: "integer" + description: | + Size of `/dev/shm` in bytes. If omitted, the system uses 64MB. + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. + For example: + + ``` + {"net.ipv4.ip_forward": "1"} + ``` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: | + Initial console size, as an `[height, width]` array. (Windows only) + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: | + Isolation technology of the container. (Windows only) + enum: + - "default" + - "process" + - "hyperv" + MaskedPaths: + type: "array" + description: | + The list of paths to be masked inside the container (this overrides + the default set of paths). + items: + type: "string" + ReadonlyPaths: + type: "array" + description: | + The list of paths to be set as read-only inside the container + (this overrides the default set of paths). + items: + type: "string" + + ContainerConfig: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: | + Attach standard streams to a TTY, including `stdin` if it is not closed. + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the + form `["VAR=value", ...]`. A variable without `=` is removed from the + environment, rather than to have an empty value. + type: "array" + items: + type: "string" + Cmd: + description: | + Command to run specified as a string or an array of strings. + type: "array" + items: + type: "string" + Healthcheck: + $ref: "#/definitions/HealthConfig" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: | + The name of the image to use when creating the container/ + type: "string" + Volumes: + description: | + An object mapping mount point paths inside the container to empty + objects. + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the + entry point is reset to system default (i.e., the entry point used by + docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: "array" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: | + `ONBUILD` metadata that were defined in the image's `Dockerfile`. + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: | + Signal to stop a container as a string or unsigned integer. + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: | + Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell. + type: "array" + items: + type: "string" + + NetworkingConfig: + description: | + NetworkingConfig represents the container's networking configuration for + each of its interfaces. + It is used for the networking configs specified in the `docker create` + and `docker network connect` commands. + type: "object" + properties: + EndpointsConfig: + description: | + A mapping of network name to endpoint configuration for that network. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + # putting an example here, instead of using the example values from + # /definitions/EndpointSettings, because containers/create currently + # does not support attaching to multiple networks, so the example request + # would be confusing if it showed that multiple networks can be contained + # in the EndpointsConfig. + # TODO remove once we support multiple networks on container create (see https://github.com/moby/moby/blob/07e6b843594e061f82baa5fa23c2ff7d536c2a05/daemon/create.go#L323) + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" + type: "object" + properties: + Bridge: + description: Name of the network'a bridge (for example, `docker0`). + type: "string" + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. + type: "string" + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. + type: "string" + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. + type: "integer" + example: "64" + Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" + + GraphDriverData: + description: "Information about a container's graph driver." + type: "object" + required: [Name, Data] + properties: + Name: + type: "string" + x-nullable: false + Data: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + + Image: + type: "object" + required: + - Id + - Parent + - Comment + - Created + - Container + - DockerVersion + - Author + - Architecture + - Os + - Size + - VirtualSize + - GraphDriver + - RootFS + properties: + Id: + type: "string" + x-nullable: false + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + x-nullable: false + Comment: + type: "string" + x-nullable: false + Created: + type: "string" + x-nullable: false + Container: + type: "string" + x-nullable: false + ContainerConfig: + $ref: "#/definitions/ContainerConfig" + DockerVersion: + type: "string" + x-nullable: false + Author: + type: "string" + x-nullable: false + Config: + $ref: "#/definitions/ContainerConfig" + Architecture: + type: "string" + x-nullable: false + Os: + type: "string" + x-nullable: false + OsVersion: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + VirtualSize: + type: "integer" + format: "int64" + x-nullable: false + GraphDriver: + $ref: "#/definitions/GraphDriverData" + RootFS: + type: "object" + required: [Type] + properties: + Type: + type: "string" + x-nullable: false + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + Metadata: + type: "object" + properties: + LastTagTime: + type: "string" + format: "dateTime" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + CreatedAt: + type: "string" + format: "dateTime" + description: "Date/Time the volume was created." + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: | + The level at which the volume exists. Either `global` for cluster-wide, + or `local` for machine level. + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: | + The driver specific options used when creating the volume. + additionalProperties: + type: "string" + UsageData: + type: "object" + x-nullable: true + required: [Size, RefCount] + description: | + Usage details about the volume. This information is used by the + `GET /system/df` endpoint, and omitted in other endpoints. + properties: + Size: + type: "integer" + default: -1 + description: | + Amount of disk space used by the volume (in bytes). This information + is only available for volumes created with the `"local"` volume + driver. For volumes created with other volume drivers, this field + is set to `-1` ("not available") + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: | + The number of containers referencing this volume. This field + is set to `-1` if the reference-count is not available. + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + CreatedAt: "2016-06-07T20:31:11.853781916Z" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Attachable: + type: "boolean" + Ingress: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Attachable: false + Ingress: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: | + List of IPAM configuration options, specified as a map: + + ``` + {"Subnet": , "IPRange": , "Gateway": , "AuxAddress": } + ``` + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "object" + additionalProperties: + type: "string" + + NetworkContainer: + type: "object" + properties: + Name: + type: "string" + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + properties: + ID: + type: "string" + Parent: + type: "string" + Type: + type: "string" + Description: + type: "string" + InUse: + type: "boolean" + Shared: + type: "boolean" + Size: + description: | + Amount of disk space used by the build cache (in bytes). + type: "integer" + CreatedAt: + description: | + Date and time at which the build cache was created in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + LastUsedAt: + description: | + Date and time at which the build cache was last used in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + x-nullable: true + example: "2017-08-09T07:09:37.632105588Z" + UsageCount: + type: "integer" + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + + CreateImageInfo: + type: "object" + properties: + id: + type: "string" + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + + ProgressDetail: + type: "object" + properties: + current: + type: "integer" + total: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + # Configurations + IPAMConfig: + $ref: "#/definitions/EndpointIPAMConfig" + Links: + type: "array" + items: + type: "string" + example: + - "container_1" + - "container_2" + Aliases: + type: "array" + items: + type: "string" + example: + - "server_x" + - "server_y" + + # Operational data + NetworkID: + description: | + Unique ID of the network. + type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" + EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for this network. + type: "string" + example: "172.17.0.1" + IPAddress: + description: | + IPv4 address. + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address. + type: "string" + example: "2001:db8:2::100" + GlobalIPv6Address: + description: | + Global IPv6 address. + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + type: "integer" + format: "int64" + example: 64 + MacAddress: + description: | + MAC address for the endpoint on this network. + type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + example: "some-mount" + Description: + type: "string" + x-nullable: false + example: "This is a mount that's used by the plugin." + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + example: "/var/lib/docker/plugins/" + Destination: + type: "string" + x-nullable: false + example: "/mnt/state" + Type: + type: "string" + x-nullable: false + example: "bind" + Options: + type: "array" + items: + type: "string" + example: + - "rbind" + - "rw" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + example: "/dev/fuse" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Engine API" + type: "object" + required: [Settings, Enabled, Config, Name] + properties: + Id: + type: "string" + example: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: + type: "string" + x-nullable: false + example: "tiborvass/sample-volume-plugin" + Enabled: + description: + True if the plugin is running. False if the plugin is not running, + only installed. + type: "boolean" + x-nullable: false + example: true + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + example: + - "DEBUG=0" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PluginReference: + description: "plugin remote reference used to push/pull the plugin" + type: "string" + x-nullable: false + example: "localhost:5000/tiborvass/sample-volume-plugin:latest" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - WorkDir + - Network + - Linux + - PidHost + - PropagatedMount + - IpcHost + - Mounts + - Env + - Args + properties: + DockerVersion: + description: "Docker Version used to create the plugin" + type: "string" + x-nullable: false + example: "17.06.0-ce" + Description: + type: "string" + x-nullable: false + example: "A sample volume plugin for Docker" + Documentation: + type: "string" + x-nullable: false + example: "https://docs.docker.com/engine/extend/plugins/" + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + example: + - "docker.volumedriver/1.0" + Socket: + type: "string" + x-nullable: false + example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" + Entrypoint: + type: "array" + items: + type: "string" + example: + - "/usr/bin/sample-volume-plugin" + - "/data" + WorkDir: + type: "string" + x-nullable: false + example: "/bin/" + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + example: 1000 + GID: + type: "integer" + format: "uint32" + example: 1000 + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + example: "host" + Linux: + type: "object" + x-nullable: false + required: [Capabilities, AllowAllDevices, Devices] + properties: + Capabilities: + type: "array" + items: + type: "string" + example: + - "CAP_SYS_ADMIN" + - "CAP_SYSLOG" + AllowAllDevices: + type: "boolean" + x-nullable: false + example: false + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + PropagatedMount: + type: "string" + x-nullable: false + example: "/mnt/volumes" + IpcHost: + type: "boolean" + x-nullable: false + example: false + PidHost: + type: "boolean" + x-nullable: false + example: false + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + example: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "0" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + example: "args" + Description: + x-nullable: false + type: "string" + example: "command line arguments" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + rootfs: + type: "object" + properties: + type: + type: "string" + example: "layers" + diff_ids: + type: "array" + items: + type: "string" + example: + - "sha256:675532206fbf3030b8458f88d6e26d4eb1577688a25efec97154c94e8b6b4887" + - "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + + ObjectVersion: + description: | + The version number of the object such as node, service, etc. This is needed + to avoid conflicting writes. The client must send the version number along + with the modified specification when updating these objects. + + This approach ensures safe concurrency and determinism in that the change + on the object may not be applied if the version number has changed from the + last read. In other words, if two update requests specify the same base + version, only one of the requests can succeed. As a result, two separate + update requests that happen at the same time will not unintentionally + overwrite each other. + type: "object" + properties: + Index: + type: "integer" + format: "uint64" + example: 373531 + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + example: "my-node" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + example: "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: "active" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + + Node: + type: "object" + properties: + ID: + type: "string" + example: "24ifsmvkjbyhk" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + $ref: "#/definitions/NodeDescription" + Status: + $ref: "#/definitions/NodeStatus" + ManagerStatus: + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" + TLSInfo: + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + + TLSInfo: + description: | + Information about the issuer of leaf TLS certificates and the trusted root + CA certificate. + type: "object" + properties: + TrustRoot: + description: | + The root CA certificate(s) that are used to validate leaf TLS + certificates. + type: "string" + CertIssuerSubject: + description: + The base64-url-safe-encoded raw subject bytes of the issuer. + type: "string" + CertIssuerPublicKey: + description: | + The base64-url-safe-encoded raw public key bytes of the issuer. + type: "string" + example: + TrustRoot: | + -----BEGIN CERTIFICATE----- + MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw + EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 + MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH + A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf + 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB + Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO + PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz + pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H + -----END CERTIFICATE----- + CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" + CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + example: "default" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" + Orchestration: + description: "Orchestration configuration." + type: "object" + x-nullable: true + properties: + TaskHistoryRetentionLimit: + description: | + The number of historic tasks to keep per instance or node. If + negative, never remove completed or failed tasks. + type: "integer" + format: "int64" + example: 10 + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "uint64" + example: 10000 + KeepOldSnapshots: + description: | + The number of snapshots to keep beyond the current snapshot. + type: "integer" + format: "uint64" + LogEntriesForSlowFollowers: + description: | + The number of log entries to keep around to sync up slow followers + after a snapshot is created. + type: "integer" + format: "uint64" + example: 500 + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from + the leader before becoming a candidate and starting an election. + `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 3 + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, + the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate + directly to seconds currently, but this is NOT guaranteed. + type: "integer" + example: 1 + Dispatcher: + description: "Dispatcher configuration." + type: "object" + x-nullable: true + properties: + HeartbeatPeriod: + description: | + The delay for an agent to send a heartbeat to the dispatcher. + type: "integer" + format: "int64" + example: 5000000000 + CAConfig: + description: "CA configuration." + type: "object" + x-nullable: true + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + example: 7776000000000000 + ExternalCAs: + description: | + Configuration for forwarding signing requests to an external + certificate authority. + type: "array" + items: + type: "object" + properties: + Protocol: + description: | + Protocol for communication with the external CA (currently + only `cfssl` is supported). + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: | + URL where certificate signing requests should be sent. + type: "string" + Options: + description: | + An object with key/value pairs that are interpreted as + protocol-specific options for the external CA driver. + type: "object" + additionalProperties: + type: "string" + CACert: + description: | + The root CA certificate (in PEM format) this external CA uses + to issue TLS certificates (assumed to be to the current swarm + root CA certificate if not provided). + type: "string" + SigningCACert: + description: | + The desired signing CA certificate for all swarm node TLS leaf + certificates, in PEM format. + type: "string" + SigningCAKey: + description: | + The desired signing CA key for all swarm node TLS leaf certificates, + in PEM format. + type: "string" + ForceRotate: + description: | + An integer whose purpose is to force swarm to generate a new + signing CA certificate and key, if none have been specified in + `SigningCACert` and `SigningCAKey` + format: "uint64" + type: "integer" + EncryptionConfig: + description: "Parameters related to encryption-at-rest." + type: "object" + properties: + AutoLockManagers: + description: | + If set, generate a key and use it to lock data stored on the + managers. + type: "boolean" + example: false + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if + unspecified by a service. + + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. + type: "object" + properties: + Name: + description: | + The log driver to use as a default for new tasks. + type: "string" + example: "json-file" + Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. + type: "object" + additionalProperties: + type: "string" + example: + "max-file": "10" + "max-size": "100m" + + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" + UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. + type: "string" + format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" + Spec: + $ref: "#/definitions/SwarmSpec" + TLSInfo: + $ref: "#/definitions/TLSInfo" + RootRotationInProgress: + description: | + Whether there is currently a root CA rotation in progress for the swarm + type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope + networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the + default subnet pool. + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + PluginSpec: + type: "object" + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Name: + description: "The name or 'alias' to use for the plugin." + type: "string" + Remote: + description: "The plugin image reference to use." + type: "string" + Disabled: + description: "Disable the plugin once scheduled." + type: "boolean" + PluginPrivilege: + type: "array" + items: + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + ContainerSpec: + type: "object" + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + properties: + Image: + description: "The image name to use for the container" + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Hostname: + description: | + The hostname to use for the container, as a valid + [RFC 1123](https://tools.ietf.org/html/rfc1123) hostname. + type: "string" + Env: + description: | + A list of environment variables in the form `VAR=value`. + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Groups: + type: "array" + description: | + A list of additional groups that the container process will run as. + items: + type: "string" + Privileges: + type: "object" + description: "Security options for the container" + properties: + CredentialSpec: + type: "object" + description: "CredentialSpec for managed service account (Windows only)" + properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs + field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + File: + type: "string" + example: "spec.json" + description: | + Load credential spec from this file. The file is read by + the daemon, and must be present in the `CredentialSpecs` + subdirectory in the docker data directory, which defaults + to `C:\ProgramData\Docker\` on Windows. + + For example, specifying `spec.json` loads + `C:\ProgramData\Docker\CredentialSpecs\spec.json`. + +


+ + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + Registry: + type: "string" + description: | + Load credential spec from this value in the Windows + registry. The specified registry value must be located in: + + `HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Virtualization\Containers\CredentialSpecs` + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, + > and `CredentialSpec.Config` are mutually exclusive. + SELinuxContext: + type: "object" + description: "SELinux labels of the container" + properties: + Disable: + type: "boolean" + description: "Disable SELinux" + User: + type: "string" + description: "SELinux user label" + Role: + type: "string" + description: "SELinux role label" + Type: + type: "string" + description: "SELinux type label" + Level: + type: "string" + description: "SELinux level label" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + OpenStdin: + description: "Open `stdin`" + type: "boolean" + ReadOnly: + description: "Mount the container's root filesystem as read only." + type: "boolean" + Mounts: + description: | + Specification for mounts to be added to containers created as part + of the service. + type: "array" + items: + $ref: "#/definitions/Mount" + StopSignal: + description: "Signal to stop the container." + type: "string" + StopGracePeriod: + description: | + Amount of time to wait for the container to terminate before + forcefully killing it. + type: "integer" + format: "int64" + HealthCheck: + $ref: "#/definitions/HealthConfig" + Hosts: + type: "array" + description: | + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] + items: + type: "string" + DNSConfig: + description: | + Specification for DNS related configurations in resolver configuration + file (`resolv.conf`). + type: "object" + properties: + Nameservers: + description: "The IP addresses of the name servers." + type: "array" + items: + type: "string" + Search: + description: "A search list for host-name lookup." + type: "array" + items: + type: "string" + Options: + description: | + A list of internal resolver variables to be modified (e.g., + `debug`, `ndots:3`, etc.). + type: "array" + items: + type: "string" + Secrets: + description: | + Secrets contains references to zero or more secrets that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + SecretID: + description: | + SecretID represents the ID of the specific secret that we're + referencing. + type: "string" + SecretName: + description: | + SecretName is the name of the secret that this references, + but this is just provided for lookup/display purposes. The + secret in the reference will be identified by its ID. + type: "string" + Configs: + description: | + Configs contains references to zero or more configs that will be + exposed to the service. + type: "array" + items: + type: "object" + properties: + File: + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" + properties: + Name: + description: | + Name represents the final filename in the filesystem. + type: "string" + UID: + description: "UID represents the file UID." + type: "string" + GID: + description: "GID represents the file GID." + type: "string" + Mode: + description: "Mode represents the FileMode of the file." + type: "integer" + format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the + container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually + > exclusive + type: "object" + ConfigID: + description: | + ConfigID represents the ID of the specific config that we're + referencing. + type: "string" + ConfigName: + description: | + ConfigName is the name of the config that this references, + but this is just provided for lookup/display purposes. The + config in the reference will be identified by its ID. + type: "string" + Isolation: + type: "string" + description: | + Isolation technology of the containers running the service. + (Windows only) + enum: + - "default" + - "process" + - "hyperv" + Init: + description: | + Run an init inside the container that forwards signals and reaps + processes. This field is omitted if empty, and the default (as + configured on the daemon) is used. + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + # This option is not used by Windows containers + CapabilityAdd: + type: "array" + description: | + A list of kernel capabilities to add to the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + - "CAP_SYS_ADMIN" + - "CAP_SYS_CHROOT" + - "CAP_SYSLOG" + CapabilityDrop: + type: "array" + description: | + A list of kernel capabilities to drop from the default set + for the container. + items: + type: "string" + example: + - "CAP_NET_RAW" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" + Resources: + description: | + Resource requirements which apply to each individual container created + as part of the service. + type: "object" + properties: + Limits: + description: "Define resources limits." + $ref: "#/definitions/Limit" + Reservation: + description: "Define resources reservation." + $ref: "#/definitions/ResourceObject" + RestartPolicy: + description: | + Specification for the restart policy which applies to containers + created as part of this service. + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: | + Maximum attempts to restart a given container before giving up + (default value is 0, which is ignored). + type: "integer" + format: "int64" + default: 0 + Window: + description: | + Windows is the time window used to evaluate the restart policy + (default value is 0, which is unbounded). + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: | + An array of constraint expressions to limit the set of nodes where + a task can be scheduled. Constraint expressions can either use a + _match_ (`==`) or _exclude_ (`!=`) rule. Multiple constraints find + nodes that satisfy every expression (AND match). Constraints can + match node or Docker Engine labels as follows: + + node attribute | matches | example + ---------------------|--------------------------------|----------------------------------------------- + `node.id` | Node ID | `node.id==2ivku8v2gvtg4` + `node.hostname` | Node hostname | `node.hostname!=node-2` + `node.role` | Node role (`manager`/`worker`) | `node.role==manager` + `node.platform.os` | Node operating system | `node.platform.os==windows` + `node.platform.arch` | Node architecture | `node.platform.arch==x86_64` + `node.labels` | User-defined node labels | `node.labels.security==high` + `engine.labels` | Docker Engine's labels | `engine.labels.operatingsystem==ubuntu-14.04` + + `engine.labels` apply to Docker Engine labels like operating system, + drivers, etc. Swarm administrators add `node.labels` for operational + purposes by using the [`node update endpoint`](#operation/NodeUpdate). + + type: "array" + items: + type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" + - "node.platform.os==linux" + - "node.platform.arch==x86_64" + Preferences: + description: | + Preferences provide a way to make the scheduler aware of factors + such as topology. They are provided in order from highest to + lowest precedence. + type: "array" + items: + type: "object" + properties: + Spread: + type: "object" + properties: + SpreadDescriptor: + description: | + label descriptor, such as `engine.labels.az`. + type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: | + Maximum number of replicas for per node (default value is 0, which + is unlimited) + type: "integer" + format: "int64" + default: 0 + Platforms: + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. + type: "array" + items: + $ref: "#/definitions/Platform" + ForceUpdate: + description: | + A counter that triggers an update even if no relevant parameters have + been changed. + type: "integer" + Runtime: + description: | + Runtime is the type of runtime specified for the task executor. + type: "string" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + LogDriver: + description: | + Specifies the log driver to use for tasks created from this spec. If + not present, the default one for the swarm will be used, finally + falling back to the engine default if not specified. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + - "remove" + - "orphaned" + + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + AssignedGenericResources: + $ref: "#/definitions/GenericResources" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + JobIteration: + description: | + If the Service this Task belongs to is a job-mode service, contains + the JobIteration of the Service this Task was created for. Absent if + the Task was created for a Replicated or Global Service. + $ref: "#/definitions/ObjectVersion" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + AssignedGenericResources: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" + + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + ReplicatedJob: + description: | + The mode used for services with a finite number of tasks that run + to a completed state. + type: "object" + properties: + MaxConcurrent: + description: | + The maximum number of replicas to run simultaneously. + type: "integer" + format: "int64" + default: 1 + TotalCompletions: + description: | + The total number of replicas desired to reach the Completed + state. If unset, will default to the value of `MaxConcurrent` + type: "integer" + format: "int64" + GlobalJob: + description: | + The mode used for services which run a task to the completed state + on each valid node. + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be updated in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an updated task fails to run, or stops running + during the update. + type: "string" + enum: + - "continue" + - "pause" + - "rollback" + Monitor: + description: | + Amount of time to monitor each updated task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during an update before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling out an updated task. Either + the old task is shut down before the new task is started, or the + new task is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + RollbackConfig: + description: "Specification for the rollback strategy of the service." + type: "object" + properties: + Parallelism: + description: | + Maximum number of tasks to be rolled back in one iteration (0 means + unlimited parallelism). + type: "integer" + format: "int64" + Delay: + description: | + Amount of time between rollback iterations, in nanoseconds. + type: "integer" + format: "int64" + FailureAction: + description: | + Action to take if an rolled back task fails to run, or stops + running during the rollback. + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: | + Amount of time to monitor each rolled back task for failures, in + nanoseconds. + type: "integer" + format: "int64" + MaxFailureRatio: + description: | + The fraction of tasks that may fail during a rollback before the + failure action is invoked, specified as a floating point number + between 0 and 1. + type: "number" + default: 0 + Order: + description: | + The order of operations when rolling back a task. Either the old + task is shut down before the new task is started, or the new task + is started before the old task is shut down. + type: "string" + enum: + - "stop-first" + - "start-first" + Networks: + description: "Specifies which networks the service should attach to." + type: "array" + items: + $ref: "#/definitions/NetworkAttachmentConfig" + + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + - "sctp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: | + The mode of resolution to use for internal load balancing between tasks. + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: | + List of exposed ports that this service is accessible on from the + outside. Ports can only be provided if `vip` resolution mode is used. + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + + Service: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + ServiceStatus: + description: | + The status of the service's tasks. Provided only when requested as + part of a ServiceList operation. + type: "object" + properties: + RunningTasks: + description: | + The number of tasks for the service currently in the Running state. + type: "integer" + format: "uint64" + example: 7 + DesiredTasks: + description: | + The number of tasks for the service desired to be running. + For replicated services, this is the replica count from the + service spec. For global services, this is computed by taking + count of all tasks for the service with a Desired State other + than Shutdown. + type: "integer" + format: "uint64" + example: 10 + CompletedTasks: + description: | + The number of tasks for a job that are in the Completed state. + This field must be cross-referenced with the service type, as the + value of 0 may mean the service is not in a job mode, or it may + mean the job-mode service has no tasks yet Completed. + type: "integer" + format: "uint64" + JobStatus: + description: | + The status of the service when it is in one of ReplicatedJob or + GlobalJob modes. Absent on Replicated and Global mode services. The + JobIteration is an ObjectVersion, but unlike the Service's version, + does not need to be sent with an update request. + type: "object" + properties: + JobIteration: + description: | + JobIteration is a value increased each time a Job is executed, + successfully or otherwise. "Executed", in this case, means the + job as a whole has been started, not that an individual Task has + been launched. A job is "Executed" when its ServiceSpec is + updated. JobIteration can be used to disambiguate Tasks belonging + to different executions of a job. Though JobIteration will + increase with each subsequent execution, it may not necessarily + increase by 1, and so JobIteration should not be used to + $ref: "#/definitions/ObjectVersion" + LastExecution: + description: | + The last time, as observed by the server, that this job was + started. + type: "string" + format: "dateTime" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + + ImageDeleteResponseItem: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + + ServiceUpdateResponse: + type: "object" + properties: + Warnings: + description: "Optional warning messages" + type: "array" + items: + type: "string" + example: + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + + SecretSpec: + type: "object" + properties: + Name: + description: "User-defined name of the secret." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: | + Name of the secrets driver used to fetch the secret's value from an + external secret store. + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Secret: + type: "object" + properties: + ID: + type: "string" + example: "blt1owaxmitz71s9v5zh81zun" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: + type: "string" + format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" + Spec: + $ref: "#/definitions/SecretSpec" + + ConfigSpec: + type: "object" + properties: + Name: + description: "User-defined name of the config." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Data: + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-5)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + + Config: + type: "object" + properties: + ID: + type: "string" + Version: + $ref: "#/definitions/ObjectVersion" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ConfigSpec" + + ContainerState: + description: | + ContainerState stores container's running state. It's part of ContainerJSONBase + and will be returned by the "inspect" command. + type: "object" + properties: + Status: + description: | + String representation of the container state. Can be one of "created", + "running", "paused", "restarting", "removing", "exited", or "dead". + type: "string" + enum: ["created", "running", "paused", "restarting", "removing", "exited", "dead"] + example: "running" + Running: + description: | + Whether this container is running. + + Note that a running container can be _paused_. The `Running` and `Paused` + booleans are not mutually exclusive: + + When pausing a container (on Linux), the freezer cgroup is used to suspend + all processes in the container. Freezing the process requires the process to + be running. As a result, paused containers are both `Running` _and_ `Paused`. + + Use the `Status` field instead to determine if a container's state is "running". + type: "boolean" + example: true + Paused: + description: "Whether this container is paused." + type: "boolean" + example: false + Restarting: + description: "Whether this container is restarting." + type: "boolean" + example: false + OOMKilled: + description: | + Whether this container has been killed because it ran out of memory. + type: "boolean" + example: false + Dead: + type: "boolean" + example: false + Pid: + description: "The process ID of this container" + type: "integer" + example: 1234 + ExitCode: + description: "The last exit code of this container" + type: "integer" + example: 0 + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + example: "2020-01-06T09:06:59.461876391Z" + FinishedAt: + description: "The time when this container last exited." + type: "string" + example: "2020-01-06T09:07:59.461876391Z" + Health: + x-nullable: true + $ref: "#/definitions/Health" + + SystemVersion: + type: "object" + description: | + Response of Engine API: GET "/version" + properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + description: | + Information about system components + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + description: | + Name of the component + type: "string" + example: "Engine" + Version: + description: | + Version of the component + type: "string" + x-nullable: false + example: "19.03.12" + Details: + description: | + Key/value pairs of strings with additional information about the + component. These values are intended for informational purposes + only, and their content is not defined, and not part of the API + specification. + + These messages can be printed by the client as information to the user. + type: "object" + x-nullable: true + Version: + description: "The version of the daemon" + type: "string" + example: "19.03.12" + ApiVersion: + description: | + The default (and highest) API version that is supported by the daemon + type: "string" + example: "1.40" + MinAPIVersion: + description: | + The minimum API version that is supported by the daemon + type: "string" + example: "1.12" + GitCommit: + description: | + The Git commit of the source code that was used to build the daemon + type: "string" + example: "48a66213fe" + GoVersion: + description: | + The version Go used to compile the daemon, and the version of the Go + runtime in use. + type: "string" + example: "go1.13.14" + Os: + description: | + The operating system that the daemon is running on ("linux" or "windows") + type: "string" + example: "linux" + Arch: + description: | + The architecture that the daemon is running on + type: "string" + example: "amd64" + KernelVersion: + description: | + The kernel version (`uname -r`) that the daemon is running on. + + This field is omitted when empty. + type: "string" + example: "4.19.76-linuxkit" + Experimental: + description: | + Indicates if the daemon is started with experimental features enabled. + + This field is omitted when empty / false. + type: "boolean" + example: true + BuildTime: + description: | + The date and time that the daemon was compiled. + type: "string" + example: "2020-06-22T15:49:27.000000000+00:00" + + + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemory: + description: | + Indicates if the host has kernel memory limit support enabled. + +


+ + > **Deprecated**: This field is deprecated as the kernel 5.4 deprecated + > `kmem.limit_in_bytes`. + type: "boolean" + example: true + CpuCfsPeriod: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) period is supported by + the host. + type: "boolean" + example: true + CpuCfsQuota: + description: | + Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by + the host. + type: "boolean" + example: true + CPUShares: + description: | + Indicates if CPU Shares limiting is supported by the host. + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: | + Indicates if the daemon is running in debug-mode / with debug-level + logging enabled. + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + CgroupVersion: + description: | + The version of the cgroup. + type: "string" + enum: ["1", "2"] + default: "1" + example: "1" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSVersion: + description: | + Version of the host's operating system + +


+ + > **Note**: The information returned in this field, including its + > very existence, and the formatting of values, should not be considered + > stable, and may change without notice. + type: "string" + example: "16.04" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in bytes. + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + + > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) + > returns the Swarm version instead of the daemon version, for example + > `swarm/1.2.8`. + type: "string" + example: "17.06.0-ce" + ClusterStore: + description: | + URL of the distributed storage backend. + + + The storage backend is used for multihost networking (to store + network and endpoint information) and by the node discovery mechanism. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "consul://consul.corp.example.com:8600/some/path" + ClusterAdvertise: + description: | + The network endpoint that the Engine advertises for the purpose of + node discovery. ClusterAdvertise is a `host:port` combination on which + the daemon is reachable by other hosts. + +


+ + > **Deprecated**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "node5.corp.example.com:8000" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + DefaultAddressPools: + description: | + List of custom default address pools for local networks, which can be + specified in the daemon.json file or dockerd option. + + Example: a Base "10.10.0.0/16" with Size 24 will define the set of 256 + 10.10.[0-255].0/24 address pools. + type: "array" + items: + type: "object" + properties: + Base: + description: "The network address in CIDR format" + type: "string" + example: "10.10.0.0/16" + Size: + description: "The network pool size" + type: "integer" + example: "24" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + + NetworkAttachmentConfig: + description: | + Specifies how a service should be attached to a particular network. + type: "object" + properties: + Target: + description: | + The target network for attachment. Must be a network name or ID. + type: "string" + Aliases: + description: | + Discoverable alternate names for the service on this network. + type: "array" + items: + type: "string" + DriverOpts: + description: | + Driver attachment options for the network target. + type: "object" + additionalProperties: + type: "string" + +paths: + /containers/json: + get: + summary: "List containers" + description: | + Returns a list of containers. For details on the format, see the + [inspect endpoint](#operation/ContainerInspect). + + Note that it uses a different, smaller representation of a container + than inspecting a single container. For example, the list of linked + containers is not propagated . + operationId: "ContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: | + Return all containers. By default, only running containers are shown. + type: "boolean" + default: false + - name: "limit" + in: "query" + description: | + Return this number of most recently created containers, including + non-running ones. + type: "integer" + - name: "size" + in: "query" + description: | + Return the size of container as fields `SizeRw` and `SizeRootFs`. + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a + `map[string][]string`). For example, `{"status": ["paused"]}` will + only return paused containers. + + Available filters: + + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `expose`=(`[/]`|`/[]`) + - `exited=` containers with exit code of `` + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + - `id=` a container's ID + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `is-task=`(`true`|`false`) + - `label=key` or `label="key=value"` of a container label + - `name=` a container's name + - `network`=(`` or ``) + - `publish`=(`[/]`|`/[]`) + - `since`=(`` or ``) + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `volume`=(`` or ``) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: | + Assign the specified name to the container. Must match + `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`. + type: "string" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/ContainerConfig" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + $ref: "#/definitions/NetworkingConfig" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCpus: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: 0 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + title: "ContainerCreateResponse" + description: "OK response to ContainerCreate operation" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "ContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerInspectResponse" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + x-nullable: true + $ref: "#/definitions/ContainerState" + Image: + description: "The container's image ID" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + Platform: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriverData" + SizeRw: + description: | + The size of files that have been created or changed by this + container. + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/ContainerConfig" + NetworkSettings: + $ref: "#/definitions/NetworkSettings" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Healthcheck: + Test: ["CMD-SHELL", "exit 0"] + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + DeviceRequests: + - Driver: "nvidia" + Count: -1 + DeviceIDs": ["0", "1", "GPU-fef8089b-4820-abfc-e83e-94318197576e"] + Capabilities: [["gpu", "nvidia", "compute"]] + Options: + property1: "string" + property2: "string" + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + Health: + Status: "healthy" + FailingStreak: 0 + Log: + - Start: "2019-12-22T10:59:05.6385933Z" + End: "2019-12-22T10:59:05.8078452Z" + ExitCode: 0 + Output: "" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: ["Container"] + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: | + On Unix systems, this is done by running the `ps` command. This endpoint + is not supported on Windows. + operationId: "ContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: | + Each process running in the container, where each is process + is an array of values corresponding to the titles. + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: ["Container"] + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or + `journald` logging driver. + operationId: "ContainerLogs" + responses: + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not + upgrade the connection and does not set Content-Type. + schema: + type: "string" + format: "binary" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Container"] + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, + or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "ContainerChanges" + produces: ["application/json"] + responses: + 200: + description: "The list of changes" + schema: + type: "array" + items: + type: "object" + x-go-name: "ContainerChangeResponseItem" + title: "ContainerChangeResponseItem" + description: "change item in response to ContainerChanges operation" + required: [Path, Kind] + properties: + Path: + description: "Path to file that has changed" + type: "string" + x-nullable: false + Kind: + description: "Kind of change" + type: "integer" + format: "uint8" + enum: [0, 1, 2] + x-nullable: false + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "ContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage + statistics. + + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. + + If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is + nil then for compatibility with older daemons the length of the + corresponding `cpu_usage.percpu_usage` array should be used. + + On a cgroup v2 host, the following fields are not set + * `blkio_stats`: all fields other than `io_service_bytes_recursive` + * `cpu_stats`: `cpu_usage.percpu_usage` + * `memory_stats`: `max_usage` and `failcnt` + Also, `memory_stats.stats` fields are incompatible with cgroup v1. + + To calculate the values shown by the `stats` command of the docker cli tool + the following formulas can be used: + * used_memory = `memory_stats.usage - memory_stats.stats.cache` + * available_memory = `memory_stats.limit` + * Memory usage % = `(used_memory / available_memory) * 100.0` + * cpu_delta = `cpu_stats.cpu_usage.total_usage - precpu_stats.cpu_usage.total_usage` + * system_cpu_delta = `cpu_stats.system_cpu_usage - precpu_stats.system_cpu_usage` + * number_cpus = `lenght(cpu_stats.cpu_usage.percpu_usage)` or `cpu_stats.online_cpus` + * CPU usage % = `(cpu_delta / system_cpu_delta) * number_cpus * 100.0` + operationId: "ContainerStats" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + online_cpus: 4 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: | + Stream the output. If false, the stats will be output once and then + it will disconnect. + type: "boolean" + default: true + - name: "one-shot" + in: "query" + description: | + Only get a single stat instead of waiting for 2 cycles. Must be used + with `stream=false`. + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container." + operationId: "ContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Container"] + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "ContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container. Format is a + single character `[a-Z]` or `ctrl-` where `` is one + of: `a-z`, `@`, `^`, `[`, `,` or `_`. + type: "string" + tags: ["Container"] + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "ContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "ContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: ["Container"] + /containers/{id}/kill: + post: + summary: "Kill a container" + description: | + Send a POSIX signal to a container, defaulting to killing to the + container. + operationId: "ContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: ["Container"] + /containers/{id}/update: + post: + summary: "Update a container" + description: | + Change various configuration options of a container without having to + recreate it. + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: ["Container"] + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "ContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: ["Container"] + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the freezer cgroup to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, + which is observable by the process being suspended. With the freezer + cgroup the process is unaware, and unable to capture, that it is being + suspended, and subsequently resumed. + operationId: "ContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "ContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: ["Container"] + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach + to the same container multiple times and you can reattach to containers + that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint + to do anything. + + See the [documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) + for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, + and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used + for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client + can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will + similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream over the hijacked connected is multiplexed to separate out + `stdout` and `stderr`. The stream consists of a series of frames, each + containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or + `stderr`). It also contains the size of the associated frame encoded in + the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size + encoded as big endian. + + Following the header is the payload, which is the specified number of + bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/ContainerCreate), + the stream is not multiplexed. The data exchanged over the hijacked + connection is simply the raw data from the process PTY and client's + `stdin`. + + operationId: "ContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,` or `_`. + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you + want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been + returned, it will seamlessly transition into streaming current + output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: | + Stream attached streams from the time the request was made onwards. + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "ContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: | + Override the key sequence for detaching a container.Format is a single + character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, + `@`, `^`, `[`, `,`, or `_`. + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + title: "ContainerWaitResponse" + description: "OK response to ContainerWait operation" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + Error: + description: "container waiting error, if any" + type: "object" + properties: + Message: + description: "Details of an error" + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "condition" + in: "query" + description: | + Wait until a container state reaches the given condition, either + 'not-running' (default), 'next-exit', or 'removed'. + type: "string" + default: "not-running" + tags: ["Container"] + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "ContainerDelete" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: | + You cannot remove a running container: c2ada9df5af8. Stop the + container before attempting removal or force remove + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove anonymous volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + - name: "link" + in: "query" + description: "Remove the specified link associated with the container." + type: "boolean" + default: false + tags: ["Container"] + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: | + A response header `X-Docker-Container-Path-Stat` is returned, containing + a base64 - encoded JSON object with some filesystem header information + about the path. + operationId: "ContainerArchiveInfo" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: | + A base64 - encoded JSON object with some filesystem header + information about the path + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: | + The error message. Either "must specify path parameter" + (path cannot be empty) or "not a directory" (path was + asserted to be a directory but exists as a file). + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get a tar archive of a resource in the filesystem of container id." + operationId: "ContainerArchive" + produces: ["application/x-tar"] + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: | + The error message. Either "must specify path parameter" + (path cannot be empty) or "not a directory" (path was + asserted to be a directory but exists as a file). + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: ["Container"] + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: ["application/x-tar", "application/octet-stream"] + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: | + If `1`, `true`, or `True` then it will be an error if unpacking the + given content would cause an existing directory to be replaced with + a non-directory and vice versa. + type: "string" + - name: "copyUIDGID" + in: "query" + description: | + If `1`, `true`, then it will copy UID/GID maps to the dest file or + dir + type: "string" + - name: "inputStream" + in: "body" + required: true + description: | + The input stream must be a tar archive compressed with one of the + following algorithms: `identity` (no compression), `gzip`, `bzip2`, + or `xz`. + schema: + type: "string" + format: "binary" + tags: ["Container"] + /containers/prune: + post: + summary: "Delete stopped containers" + produces: + - "application/json" + operationId: "ContainerPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune containers created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune containers with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ContainerPruneResponse" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Container"] + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "ImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the images list. + + Available filters: + + - `before`=(`[:]`, `` or ``) + - `dangling=true` + - `label=key` or `label="key=value"` of an image label + - `reference`=(`[:]`) + - `since`=(`[:]`, `` or ``) + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: ["Image"] + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "ImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "extrahosts" + in: "query" + description: "Extra hosts to add to /etc/hosts" + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: | + Sets the networking mode for the run commands during build. Supported + standard values are: `bridge`, `host`, `none`, and `container:`. + Any other value is taken as a custom network's name or ID to which this + container should connect to. + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/x-tar" + default: "application/x-tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /build/prune: + post: + summary: "Delete builder cache" + produces: + - "application/json" + operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the list of build cache objects. + + Available filters: + + - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` + responses: + 200: + description: "No error" + schema: + type: "object" + title: "BuildPruneResponse" + properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "ImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 404: + description: "repository does not exist or no read access" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "message" + in: "query" + description: "Set commit message for imported image." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + tags: ["Image"] + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "ImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + Data: {} + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Image"] + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "ImageHistory" + produces: ["application/json"] + responses: + 200: + description: "List of image layers" + schema: + type: "array" + items: + type: "object" + x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" + required: [Id, Created, CreatedBy, Tags, Size, Comment] + properties: + Id: + type: "string" + x-nullable: false + Created: + type: "integer" + format: "int64" + x-nullable: false + CreatedBy: + type: "string" + x-nullable: false + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + x-nullable: false + Comment: + type: "string" + x-nullable: false + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must + already have a tag which references the registry. For example, + `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "ImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + required: true + tags: ["Image"] + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "ImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: ["Image"] + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were + referenced by that image. + + Images can't be removed if they have descendant images, are being + used by a running container or are being used by a build. + operationId: "ImageDelete" + produces: ["application/json"] + responses: + 200: + description: "The image was deleted successfully" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: ["Image"] + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "ImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + title: "ImageSearchResponseItem" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `is-automated=(true|false)` + - `is-official=(true|false)` + - `stars=` Matches images that has at least 'number' stars. + type: "string" + tags: ["Image"] + /images/prune: + post: + summary: "Delete unused images" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). Available filters: + + - `dangling=` When set to `true` (or `1`), prune only + unused *and* untagged images. When set to `false` + (or `0`), all unused images are pruned. + - `until=` Prune images created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune images with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ImagePruneResponse" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponseItem" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Image"] + /auth: + post: + summary: "Check auth configuration" + description: | + Validate credentials for a registry and, if available, get an identity + token for accessing the registry without password. + operationId: "SystemAuth" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + title: "SystemAuthResponse" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["System"] + /info: + get: + summary: "Get system information" + operationId: "SystemInfo" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/SystemInfo" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "SystemVersion" + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/SystemVersion" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPing" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + Builder-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /commit: + post: + summary: "Create a new image from a container" + operationId: "ImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/ContainerConfig" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: ["Image"] + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, `update`, and `prune` + + Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, `untag`, and `prune` + + Volumes report these events: `create`, `mount`, `unmount`, `destroy`, and `prune` + + Networks report these events: `create`, `connect`, `disconnect`, `destroy`, `update`, `remove`, and `prune` + + The Docker daemon reports these events: `reload` + + Services report these events: `create`, `update`, and `remove` + + Nodes report these events: `create`, `update`, and `remove` + + Secrets report these events: `create`, `update`, and `remove` + + Configs report these events: `create`, `update`, and `remove` + + The Builder reports `prune` events + + operationId: "SystemEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemEventsResponse" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `config=` config name or ID + - `container=` container name or ID + - `daemon=` daemon name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `network=` network name or ID + - `node=` node ID + - `plugin`= plugin name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name + type: "string" + tags: ["System"] + /system/df: + get: + summary: "Get data usage information" + operationId: "SystemDataUsage" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "SystemDataUsageResponse" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/my-volume/_data" + Labels: null + Scope: "local" + Options: null + UsageData: + Size: 10920104 + RefCount: 2 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["System"] + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "ImageGet" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: ["Image"] + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image + repositories. + + For each value of the `names` parameter: if it is a specific name and + tag (e.g. `ubuntu:latest`), then only that image (and its parents) are + returned; if it is an image ID, similarly only that image (and its parents) + are returned and there would be no names referenced in the 'repositories' + file for this image ID. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageGetAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: ["Image"] + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see the [export image endpoint](#operation/ImageGet). + operationId: "ImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: ["Image"] + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "ContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: | + Override the key sequence for detaching a container. Format is + a single character `[a-Z]` or `ctrl-` where `` + is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: | + A list of environment variables in the form `["VAR=value", ...]`. + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: | + The user, and optionally, group to run the exec process inside + the container. Format is one of: `user`, `user:group`, `uid`, + or `uid:gid`. + WorkingDir: + type: "string" + description: | + The working directory for the exec process inside the container. + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: ["Exec"] + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: | + Starts a previously set up exec instance. If detach is true, this endpoint + returns immediately after starting the command. Otherwise, it sets up an + interactive session with the command. + operationId: "ExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: | + Resize the TTY session used by an exec instance. This endpoint only works + if `tty` was specified as part of creating and starting the exec instance. + operationId: "ExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: ["Exec"] + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "ExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "ExecInspectResponse" + properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: ["Exec"] + + /volumes: + get: + summary: "List volumes" + operationId: "VolumeList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + title: "VolumeListResponse" + description: "Volume list response" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: | + Warnings that occurred when fetching the list of volumes. + items: + type: "string" + + examples: + application/json: + Volumes: + - CreatedAt: "2017-07-19T12:00:26Z" + Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches volumes based on their driver. + - `label=` or `label=:` Matches volumes based on + the presence of a `label` alone or a `label` and a value. + - `name=` Matches all or part of a volume name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumeCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + description: "Volume configuration" + title: "VolumeConfig" + properties: + Name: + description: | + The new volume's name. If not specified, Docker generates a name. + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: | + A mapping of driver options and values. These options are + passed directly to the driver and are driver specific. + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumeInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumeDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + produces: + - "application/json" + operationId: "VolumePrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune volumes with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "VolumePruneResponse" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Volume"] + /networks: + get: + summary: "List networks" + description: | + Returns a list of networks. For details on the format, see the + [network inspect endpoint](#operation/NetworkInspect). + + Note that it uses a different, smaller representation of a network than + inspecting a single network. For example, the list of containers attached + to the network is not propagated in API versions 1.28 and up. + operationId: "NetworkList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + Attachable: false + Ingress: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process + on the networks list. + + Available filters: + + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `scope=["swarm"|"global"|"local"]` Filters networks by scope (`swarm`, `global`, or `local`). + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworkInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "verbose" + in: "query" + description: "Detailed inspect output for troubleshooting" + type: "boolean" + default: false + - name: "scope" + in: "query" + description: "Filter the network by scope (swarm, global, or local)" + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "NetworkDelete" + responses: + 204: + description: "No error" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworkCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + title: "NetworkCreateResponse" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 403: + description: "operation not supported for pre-defined networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: | + Check for networks with duplicate names. Since Network is + primarily keyed based on a random ID and not on the name, and + network name is strictly a user-friendly alias to the network + which is uniquely identified using ID, there is no guaranteed + way to check for duplicates. CheckDuplicate is there to provide + a best effort checking of any networks which has the same name + but it is not guaranteed to catch all name collisions. + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + Attachable: + description: | + Globally scoped network is manually attachable by regular + containers from workers in swarm mode. + type: "boolean" + Ingress: + description: | + Ingress network is the network which provides the routing-mesh + in swarm mode. + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Attachable: false + Ingress: false + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworkConnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworkDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: | + The ID or name of the container to disconnect from the network. + Force: + type: "boolean" + description: | + Force the container to disconnect from the network. + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + produces: + - "application/json" + operationId: "NetworkPrune" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the prune list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `until=` Prune networks created before this timestamp. The `` can be Unix timestamps, date formatted timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed relative to the daemon machine’s time. + - `label` (`label=`, `label==`, `label!=`, or `label!==`) Prune networks with (or without, in case `label!=...` is used) the specified labels. + type: "string" + responses: + 200: + description: "No error" + schema: + type: "object" + title: "NetworkPruneResponse" + properties: + NetworksDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Network"] + /plugins: + get: + summary: "List plugins" + operationId: "PluginList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the plugin list. + + Available filters: + + - `capability=` + - `enable=|` + tags: ["Plugin"] + + /plugins/privileges: + get: + summary: "Get plugin privileges" + operationId: "GetPluginPrivileges" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: | + Describes a permission the user has to accept upon installing + the plugin. + type: "object" + title: "PluginPrivilegeItem" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: + - "Plugin" + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PluginPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be + enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginsEnable). + produces: + - "application/json" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "remote" + in: "query" + description: | + Remote reference for plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "name" + in: "query" + description: | + Local name for the pulled plugin. + + The `:latest` tag is optional, and is used as the default if omitted. + required: false + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/{name}/json: + get: + summary: "Inspect a plugin" + operationId: "PluginInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}: + delete: + summary: "Remove a plugin" + operationId: "PluginDelete" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "force" + in: "query" + description: | + Disable the plugin before removing. This may result in issues if the + plugin is in use by a container. + type: "boolean" + default: false + tags: ["Plugin"] + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PluginEnable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "timeout" + in: "query" + description: "Set the HTTP client timeout (in seconds)" + type: "integer" + default: 0 + tags: ["Plugin"] + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PluginDisable" + responses: + 200: + description: "no error" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + tags: ["Plugin"] + /plugins/{name}/upgrade: + post: + summary: "Upgrade a plugin" + operationId: "PluginUpgrade" + responses: + 204: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "remote" + in: "query" + description: | + Remote reference to upgrade to. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration to use when pulling a plugin + from a registry. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + description: | + Describes a permission accepted by the user upon installing the + plugin. + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + tags: ["Plugin"] + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PluginCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: ["Plugin"] + /plugins/{name}/push: + post: + summary: "Push a plugin" + operationId: "PluginPush" + description: | + Push a plugin to the registry. + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + responses: + 200: + description: "no error" + 404: + description: "plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PluginSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: | + The name of the plugin. The `:latest` tag is optional, and is the + default if omitted. + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugin"] + /nodes: + get: + summary: "List nodes" + operationId: "NodeList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `label=` + - `membership=`(`accepted`|`pending`)` + - `name=` + - `node.label=` + - `role=`(`manager`|`worker`)` + type: "string" + tags: ["Node"] + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "NodeInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + tags: ["Node"] + delete: + summary: "Delete a node" + operationId: "NodeDelete" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: ["Node"] + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "NodeUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: | + The version number of the node object being updated. This is required + to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Node"] + /swarm: + get: + summary: "Inspect swarm" + operationId: "SwarmInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Swarm" + 404: + description: "no such swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "SwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication, as well + as determining the networking interface used for the VXLAN + Tunnel Endpoint (VTEP). This can either be an address/port + combination in the form `192.168.1.1:4567`, or an interface + followed by a port number, like `eth0:4567`. If the port number + is omitted, the default swarm listening port is used. + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same address + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global + scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created + from the default subnet pool. + type: "integer" + format: "uint32" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + EncryptionConfig: + AutoLockManagers: false + tags: ["Swarm"] + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "SwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: | + Listen address used for inter-manager communication if the node + gets promoted to manager, as well as determining the networking + interface used for the VXLAN Tunnel Endpoint (VTEP). + type: "string" + AdvertiseAddr: + description: | + Externally reachable address advertised to other nodes. This + can either be an address/port combination in the form + `192.168.1.1:4567`, or an interface followed by a port number, + like `eth0:4567`. If the port number is omitted, the port + number from the listen address is used. If `AdvertiseAddr` is + not specified, it will be automatically detected when possible. + type: "string" + DataPathAddr: + description: | + Address or interface to use for data path traffic (format: + ``), for example, `192.168.1.1`, or an interface, + like `eth0`. If `DataPathAddr` is unspecified, the same addres + as `AdvertiseAddr` is used. + + The `DataPathAddr` specifies the address that global scope + network drivers will publish towards other nodes in order to + reach the containers running on this node. Using this parameter + it is possible to separate the container data traffic from the + management traffic of the cluster. + + type: "string" + RemoteAddrs: + description: | + Addresses of manager nodes already participating in the swarm. + type: "array" + items: + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: ["Swarm"] + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "SwarmLeave" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: | + Force leave swarm, even if this is the last manager or that it will + break the cluster. + in: "query" + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/update: + post: + summary: "Update a swarm" + operationId: "SwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: | + The version number of the swarm object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + - name: "rotateManagerUnlockKey" + in: "query" + description: "Rotate the manager unlock key." + type: "boolean" + default: false + tags: ["Swarm"] + /swarm/unlockkey: + get: + summary: "Get the unlock key" + operationId: "SwarmUnlockkey" + consumes: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + title: "UnlockKeyResponse" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /swarm/unlock: + post: + summary: "Unlock a locked manager" + operationId: "SwarmUnlock" + consumes: + - "application/json" + produces: + - "application/json" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + UnlockKey: + description: "The swarm's unlock key." + type: "string" + example: + UnlockKey: "SWMKEY-1-7c37Cc8654o6p38HnroywCi19pllOnGtbdZEgtKxZu8" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Swarm"] + /services: + get: + summary: "List services" + operationId: "ServiceList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the services list. + + Available filters: + + - `id=` + - `label=` + - `mode=["replicated"|"global"]` + - `name=` + - name: "status" + in: "query" + type: "boolean" + description: | + Include service status, with count of running and desired tasks. + tags: ["Service"] + /services/create: + post: + summary: "Create a service" + operationId: "ServiceCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + title: "ServiceCreateResponse" + properties: + ID: + description: "The ID of the created service." + type: "string" + Warning: + description: "Optional warning message" + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "network is not eligible for services" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + Hosts: ["10.10.10.10 host1", "ABCD:EF01:2345:6789:ABCD:EF01:2345:6789 host2"] + User: "33" + DNSConfig: + Nameservers: ["8.8.8.8"] + Search: ["example.org"] + Options: ["timeout:3"] + Secrets: + - + File: + Name: "www.example.org.key" + UID: "33" + GID: "33" + Mode: 384 + SecretID: "fpjqlhnwb19zds35k8wn80lq9" + SecretName: "example_org_domain_key" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + tags: ["Service"] + /services/{id}: + get: + summary: "Inspect a service" + operationId: "ServiceInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "insertDefaults" + in: "query" + description: "Fill empty fields with default values." + type: "boolean" + default: false + tags: ["Service"] + delete: + summary: "Delete a service" + operationId: "ServiceDelete" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: ["Service"] + /services/{id}/update: + post: + summary: "Update a service" + operationId: "ServiceUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ServiceUpdateResponse" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 2 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + RollbackConfig: + Parallelism: 1 + Delay: 1000000000 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: | + The version number of the service object being updated. This is + required to avoid conflicting writes. + This version number should be the value as currently set on the + service *before* the update. You can find the current version by + calling `GET /services/{id}` + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + description: | + If the `X-Registry-Auth` header is not specified, this parameter + indicates where to find registry authorization credentials. + type: "string" + enum: ["spec", "previous-spec"] + default: "spec" + - name: "rollback" + in: "query" + description: | + Set to this parameter to `previous` to cause a server-side rollback + to the previous service spec. The supplied spec will be ignored in + this case. + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: | + A base64url-encoded auth configuration for pulling from private + registries. + + Refer to the [authentication section](#section/Authentication) for + details. + type: "string" + + tags: ["Service"] + /services/{id}/logs: + get: + summary: "Get service logs" + description: | + Get `stdout` and `stderr` logs from a service. See also + [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "ServiceLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such service: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the service" + type: "string" + - name: "details" + in: "query" + description: "Show service context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Service"] + /tasks: + get: + summary: "List tasks" + operationId: "TaskList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the tasks list. + + Available filters: + + - `desired-state=(running | shutdown | accepted)` + - `id=` + - `label=key` or `label="key=value"` + - `name=` + - `node=` + - `service=` + tags: ["Task"] + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "TaskInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: ["Task"] + /tasks/{id}/logs: + get: + summary: "Get task logs" + description: | + Get `stdout` and `stderr` logs from a task. + See also [`/containers/{id}/logs`](#operation/ContainerLogs). + + **Note**: This endpoint works only for services with the `local`, + `json-file` or `journald` logging drivers. + operationId: "TaskLogs" + responses: + 200: + description: "logs returned as a stream in response body" + schema: + type: "string" + format: "binary" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such task: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID of the task" + type: "string" + - name: "details" + in: "query" + description: "Show task context and extra details provided to logs." + type: "boolean" + default: false + - name: "follow" + in: "query" + description: "Keep connection after returning logs." + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: | + Only return this number of log lines from the end of the logs. + Specify as an integer or `all` to output all log lines. + type: "string" + default: "all" + tags: ["Task"] + /secrets: + get: + summary: "List secrets" + operationId: "SecretList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Secret" + example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the secrets list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Secret"] + /secrets/create: + post: + summary: "Create a secret" + operationId: "SecretCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/SecretSpec" + - type: "object" + example: + Name: "app-key.crt" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + tags: ["Secret"] + /secrets/{id}: + get: + summary: "Inspect a secret" + operationId: "SecretInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Secret" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + delete: + summary: "Delete a secret" + operationId: "SecretDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "secret not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the secret" + tags: ["Secret"] + /secrets/{id}/update: + post: + summary: "Update a Secret" + operationId: "SecretUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such secret" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the secret" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/SecretSpec" + description: | + The spec of the secret to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [SecretInspect endpoint](#operation/SecretInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the secret object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Secret"] + /configs: + get: + summary: "List configs" + operationId: "ConfigList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Config" + example: + - ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "server.conf" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to + process on the configs list. + + Available filters: + + - `id=` + - `label= or label==value` + - `name=` + - `names=` + tags: ["Config"] + /configs/create: + post: + summary: "Create a config" + operationId: "ConfigCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 409: + description: "name conflicts with an existing object" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + schema: + allOf: + - $ref: "#/definitions/ConfigSpec" + - type: "object" + example: + Name: "server.conf" + Labels: + foo: "bar" + Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + tags: ["Config"] + /configs/{id}: + get: + summary: "Inspect a config" + operationId: "ConfigInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Config" + examples: + application/json: + ID: "ktnbjxoalbkvbvedmg1urrz8h" + Version: + Index: 11 + CreatedAt: "2016-11-05T01:20:17.327670065Z" + UpdatedAt: "2016-11-05T01:20:17.327670065Z" + Spec: + Name: "app-dev.crt" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + delete: + summary: "Delete a config" + operationId: "ConfigDelete" + produces: + - "application/json" + responses: + 204: + description: "no error" + 404: + description: "config not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + type: "string" + description: "ID of the config" + tags: ["Config"] + /configs/{id}/update: + post: + summary: "Update a Config" + operationId: "ConfigUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such config" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + 503: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID or name of the config" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/ConfigSpec" + description: | + The spec of the config to update. Currently, only the Labels field + can be updated. All other fields must remain unchanged from the + [ConfigInspect endpoint](#operation/ConfigInspect) response values. + - name: "version" + in: "query" + description: | + The version number of the config object being updated. This is + required to avoid conflicting writes. + type: "integer" + format: "int64" + required: true + tags: ["Config"] + /distribution/{name}/json: + get: + summary: "Get image information from the registry" + description: | + Return image digest and platform information by contacting the registry. + operationId: "DistributionInspect" + produces: + - "application/json" + responses: + 200: + description: "descriptor and platform information" + schema: + type: "object" + x-go-name: DistributionInspect + title: "DistributionInspectResponse" + required: [Descriptor, Platforms] + properties: + Descriptor: + type: "object" + description: | + A descriptor struct containing digest, media type, and size. + properties: + MediaType: + type: "string" + Size: + type: "integer" + format: "int64" + Digest: + type: "string" + URLs: + type: "array" + items: + type: "string" + Platforms: + type: "array" + description: | + An array containing all platforms supported by the image. + items: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + OSVersion: + type: "string" + OSFeatures: + type: "array" + items: + type: "string" + Variant: + type: "string" + Features: + type: "array" + items: + type: "string" + examples: + application/json: + Descriptor: + MediaType: "application/vnd.docker.distribution.manifest.v2+json" + Digest: "sha256:c0537ff6a5218ef531ece93d4984efc99bbf3f7497c0a7726c88e2bb7584dc96" + Size: 3987495 + URLs: + - "" + Platforms: + - Architecture: "amd64" + OS: "linux" + OSVersion: "" + OSFeatures: + - "" + Variant: "" + Features: + - "" + 401: + description: "Failed authentication or no image found" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: ["Distribution"] + /session: + post: + summary: "Initialize interactive session" + description: | + Start a new interactive session with a server. Session allows server to + call back to the client for advanced capabilities. + + ### Hijacking + + This endpoint hijacks the HTTP connection to HTTP2 transport that allows + the client to expose gPRC services on that connection. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /session HTTP/1.1 + Upgrade: h2c + Connection: Upgrade + ``` + + The Docker daemon responds with a `101 UPGRADED` response follow with + the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Connection: Upgrade + Upgrade: h2c + ``` + operationId: "Session" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hijacking successful" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Session"] diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 0000000000..ddf15bb182 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types // import "github.com/docker/docker/api/types" + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 0000000000..bf3463b90e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 0000000000..9c464b73e2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,419 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +// ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 0000000000..3dd133a3a5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,66 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *specs.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 0000000000..f767195b94 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,69 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go new file mode 100644 index 0000000000..16dd5019ee --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -0,0 +1,20 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 0000000000..d0c852f84d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,20 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 0000000000..63381da367 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process + // is an array of values corresponding to the titles. + // + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 0000000000..c10f175ea8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 0000000000..49e05ae669 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,28 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + +// ContainerWaitOKBody OK response to ContainerWait operation +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // error + // Required: true + Error *ContainerWaitOKBodyError `json:"Error"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 0000000000..2d1cbaa9ab --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,447 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == "private" +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == "host" +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == "" +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +const ( + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") +) + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == "private" +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == "shareable" +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == "none" +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == "container" { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000000..cf6fdf4402 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000000..99f803a5bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 0000000000..cd8311f99c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 0000000000..dc942d9d9e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 0000000000..f84f034cd5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/docker/docker/api/types/events/events.go new file mode 100644 index 0000000000..aa8fba8154 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/events/events.go @@ -0,0 +1,54 @@ +package events // import "github.com/docker/docker/api/types/events" + +const ( + // BuilderEventType is the event type that the builder generates + BuilderEventType = "builder" + // ContainerEventType is the event type that containers generate + ContainerEventType = "container" + // DaemonEventType is the event type that daemon generate + DaemonEventType = "daemon" + // ImageEventType is the event type that images generate + ImageEventType = "image" + // NetworkEventType is the event type that networks generate + NetworkEventType = "network" + // PluginEventType is the event type that plugins generate + PluginEventType = "plugin" + // VolumeEventType is the event type that volumes generate + VolumeEventType = "volume" + // ServiceEventType is the event type that services generate + ServiceEventType = "service" + // NodeEventType is the event type that nodes generate + NodeEventType = "node" + // SecretEventType is the event type that secrets generate + SecretEventType = "secret" + // ConfigEventType is the event type that configs generate + ConfigEventType = "config" +) + +// Actor describes something that generates events, +// like a container, or a network, or a volume. +// It has a defined name and a set or attributes. +// The container attributes are its labels, other actors +// can generate these attributes from other properties. +type Actor struct { + ID string + Attributes map[string]string +} + +// Message represents the information an event contains +type Message struct { + // Deprecated information from JSONMessage. + // With data only in container events. + Status string `json:"status,omitempty"` + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + + Type string + Action string + Actor Actor + // Engine events are local scope. Cluster events are swarm scope. + Scope string `json:"scope,omitempty"` + + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 0000000000..4bc91cffd6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,324 @@ +/*Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) + } + return keys +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte{}, nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + if len(raw) == 0 { + return nil + } + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +type invalidFilter string + +func (e invalidFilter) Error() string { + return "Invalid filter '" + string(e) + "'" +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return invalidFilter(name) + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 0000000000..4d9bf1c62c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 0000000000..7592d2f8b1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/docker/docker/api/types/image/image_history.go new file mode 100644 index 0000000000..e302bb0aeb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -0,0 +1,36 @@ +package image // import "github.com/docker/docker/api/types/image" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// HistoryResponseItem individual image layer information in response to ImageHistory operation +// swagger:model HistoryResponseItem +type HistoryResponseItem struct { + + // comment + // Required: true + Comment string `json:"Comment"` + + // created + // Required: true + Created int64 `json:"Created"` + + // created by + // Required: true + CreatedBy string `json:"CreatedBy"` + + // Id + // Required: true + ID string `json:"Id"` + + // size + // Required: true + Size int64 `json:"Size"` + + // tags + // Required: true + Tags []string `json:"Tags"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 0000000000..b9a65a0d8e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 0000000000..e145b3dcfc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 0000000000..443b8d07a9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,131 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 0000000000..437b184c67 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,126 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return filter.Validate(acceptedFilters) +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 0000000000..abae48b9ab --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 0000000000..5699010675 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 0000000000..32962dc2eb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 0000000000..c82f204e87 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 0000000000..5c031cf8b5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 0000000000..60d1fb5ad8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 0000000000..d91234744c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 0000000000..f0a2113e40 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 0000000000..53e47084c8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,119 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 0000000000..74ea64b1bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 0000000000..20daebed14 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 0000000000..82921cebc1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 0000000000..ef020f458b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 0000000000..16202ccce6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 0000000000..af5e1c0bc2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,80 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*units.Ulimit `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 0000000000..98ef3284d1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 0000000000..1e30f5fa10 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 0000000000..0c77403ccf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 0000000000..98c2806c31 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 0000000000..e45045866a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,754 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: plugin.proto + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 0000000000..9ef169046b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; + repeated string env = 5; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 0000000000..d5213ec981 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 0000000000..6eb452d24d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,202 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 0000000000..b25f999646 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,227 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 0000000000..a6f7ab7b5c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,206 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/docker/docker/api/types/time/duration_convert.go new file mode 100644 index 0000000000..84b6f07322 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/duration_convert.go @@ -0,0 +1,12 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "strconv" + "time" +) + +// DurationToSecondsString converts the specified duration to the number +// seconds it represents, formatted as a string. +func DurationToSecondsString(duration time.Duration) string { + return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) +} diff --git a/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/docker/docker/api/types/time/timestamp.go new file mode 100644 index 0000000000..ea3495efeb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -0,0 +1,129 @@ +package time // import "github.com/docker/docker/api/types/time" + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// These are additional predefined layouts for use in Time.Format and Time.Parse +// with --since and --until parameters for `docker logs` and `docker events` +const ( + rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone + rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone + dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 + dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 +) + +// GetTimestamp tries to parse given string as golang duration, +// then RFC3339 time and finally as a Unix timestamp. If +// any of these were successful, it returns a Unix timestamp +// as string otherwise returns the given value back. +// In case of duration input, the returned timestamp is computed +// as the given reference time minus the amount of the duration. +func GetTimestamp(value string, reference time.Time) (string, error) { + if d, err := time.ParseDuration(value); value != "0" && err == nil { + return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil + } + + var format string + // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + + if strings.Contains(value, ".") { + if parseInLocation { + format = rFC3339NanoLocal + } else { + format = time.RFC3339Nano + } + } else if strings.Contains(value, "T") { + // we want the number of colons in the T portion of the timestamp + tcolons := strings.Count(value, ":") + // if parseInLocation is off and we have a +/- zone offset (not Z) then + // there will be an extra colon in the input for the tz offset subtract that + // colon from the tcolons count + if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { + tcolons-- + } + if parseInLocation { + switch tcolons { + case 0: + format = "2006-01-02T15" + case 1: + format = "2006-01-02T15:04" + default: + format = rFC3339Local + } + } else { + switch tcolons { + case 0: + format = "2006-01-02T15Z07:00" + case 1: + format = "2006-01-02T15:04Z07:00" + default: + format = time.RFC3339 + } + } + } else if parseInLocation { + format = dateLocal + } else { + format = dateWithZone + } + + var t time.Time + var err error + + if parseInLocation { + t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) + } else { + t, err = time.Parse(format, value) + } + + if err != nil { + // if there is a `-` then it's an RFC3339 like timestamp + if strings.Contains(value, "-") { + return "", err // was probably an RFC3339 like timestamp but the parser failed with an error + } + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + } + + return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil +} + +// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the +// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) +// if the incoming nanosecond portion is longer or shorter than 9 digits it is +// converted to nanoseconds. The expectation is that the seconds and +// seconds will be used to create a time variable. For example: +// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) +// if err == nil since := time.Unix(seconds, nanoseconds) +// returns seconds as def(aultSeconds) if value == "" +func ParseTimestamps(value string, def int64) (int64, int64, error) { + if value == "" { + return def, 0, nil + } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { + sa := strings.SplitN(value, ".", 2) + s, err := strconv.ParseInt(sa[0], 10, 64) + if err != nil { + return s, 0, err + } + if len(sa) != 2 { + return s, 0, nil + } + n, err := strconv.ParseInt(sa[1], 10, 64) + if err != nil { + return s, n, err + } + // should already be in nanoseconds but just in case convert n to nanoseconds + n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) + return s, n, nil +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 0000000000..e3a159912e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,635 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Variant string `json:",omitempty"` + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// NetworkAddressPool is a temp struct used by Info struct +type NetworkAddressPool struct { + Base string + Size int +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` + + // This is exposed here only for internal use + // It is not currently supported to specify custom shim configs + Shim *ShimConfig `json:"-"` +} + +// ShimConfig is used by runtime to configure containerd shims +type ShimConfig struct { + Binary string + Opts interface{} +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuildCache []*BuildCache + BuilderSize int64 // deprecated +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 0000000000..1ef911edb0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 0000000000..8ccb0aa92e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions // import "github.com/docker/docker/api/types/versions" + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 0000000000..c69b08448d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,72 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_create.go b/vendor/github.com/docker/docker/api/types/volume/volume_create.go new file mode 100644 index 0000000000..8538078dd6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -0,0 +1,31 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// VolumeCreateBody Volume configuration +// swagger:model VolumeCreateBody +type VolumeCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are + // passed directly to the driver and are driver specific. + // + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volume_list.go b/vendor/github.com/docker/docker/api/types/volume/volume_list.go new file mode 100644 index 0000000000..be06179bf4 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -0,0 +1,23 @@ +package volume // import "github.com/docker/docker/api/types/volume" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumeListOKBody Volume list response +// swagger:model VolumeListOKBody +type VolumeListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes. + // + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/docker/docker/client/README.md new file mode 100644 index 0000000000..992f18117d --- /dev/null +++ b/vendor/github.com/docker/docker/client/README.md @@ -0,0 +1,35 @@ +# Go client for the Docker Engine API + +The `docker` command uses this package to communicate with the daemon. It can also be used by your own Go applications to do anything the command-line interface does – running containers, pulling images, managing swarms, etc. + +For example, to list running containers (the equivalent of `docker ps`): + +```go +package main + +import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" +) + +func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } +} +``` + +[Full documentation is available on GoDoc.](https://godoc.org/github.com/docker/docker/client) diff --git a/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 0000000000..3aae43e3d1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 0000000000..397d67cdcf --- /dev/null +++ b/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + + if err != nil { + return nil, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/docker/docker/client/checkpoint_create.go new file mode 100644 index 0000000000..921024fe4f --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// CheckpointCreate creates a checkpoint from the given container with the given name +func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { + resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go new file mode 100644 index 0000000000..54f55fa76e --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointDelete deletes the checkpoint with the given name from the given container +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go new file mode 100644 index 0000000000..66d46dd161 --- /dev/null +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// CheckpointList returns the checkpoints of the given container in the docker host +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { + var checkpoints []types.Checkpoint + + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return checkpoints, wrapResponseError(err, resp, "container", container) + } + + err = json.NewDecoder(resp.body).Decode(&checkpoints) + return checkpoints, err +} diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go new file mode 100644 index 0000000000..21edf1fa1f --- /dev/null +++ b/vendor/github.com/docker/docker/client/client.go @@ -0,0 +1,310 @@ +/* +Package client is a Go client for the Docker Engine API. + +For more information about the Engine API, see the documentation: +https://docs.docker.com/engine/api/ + +Usage + +You use the library by creating a client object and calling methods on it. The +client can be created either from environment variables with NewClientWithOpts(client.FromEnv), +or configured manually with NewClient(). + +For example, to list running containers (the equivalent of "docker ps"): + + package main + + import ( + "context" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/client" + ) + + func main() { + cli, err := client.NewClientWithOpts(client.FromEnv) + if err != nil { + panic(err) + } + + containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{}) + if err != nil { + panic(err) + } + + for _, container := range containers { + fmt.Printf("%s %s\n", container.ID[:10], container.Image) + } + } + +*/ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "path" + "strings" + + "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + +// Client is the API client that performs all operations +// against a docker server. +type Client struct { + // scheme sets the scheme for the client + scheme string + // host holds the server address to connect to + host string + // proto holds the client protocol i.e. unix. + proto string + // addr holds the client address. + addr string + // basePath holds the path to prepend to the requests. + basePath string + // client used to send and receive http requests. + client *http.Client + // version of the server to talk to. + version string + // custom http headers configured by users. + customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool +} + +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse + } + return ErrRedirect +} + +// NewClientWithOpts initializes a new API client with default values. It takes functors +// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +func NewClientWithOpts(ops ...Opt) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) + if err != nil { + return nil, err + } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + client: client, + proto: defaultProto, + addr: defaultAddr, + } + + for _, op := range ops { + if err := op(c); err != nil { + return nil, err + } + } + + if _, ok := c.client.Transport.(http.RoundTripper); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) + } + if c.scheme == "" { + c.scheme = "http" + + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } + } + + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + url, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := new(http.Transport) + sockets.ConfigureTransport(transport, url.Scheme, url.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, + }, nil +} + +// Close the transport used by the client +func (cli *Client) Close() error { + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + return nil +} + +// getAPIPath returns the versioned request path to call the api. +// It appends the query parameters to the path if they are not empty. +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { + var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } + if cli.version != "" { + v := strings.TrimPrefix(cli.version, "v") + apiPath = path.Join(cli.basePath, "/v"+v, p) + } else { + apiPath = path.Join(cli.basePath, p) + } + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() +} + +// ClientVersion returns the API version used by this client. +func (cli *Client) ClientVersion() string { + return cli.version +} + +// NegotiateAPIVersion queries the API and updates the version to match the +// API version. Any errors are silently ignored. If a manual override is in place, +// either through the `DOCKER_API_VERSION` environment variable, or if the client +// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation +// will be performed. +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { + if !cli.manualOverride { + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) + } +} + +// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion +// if the ping version is less than the default version. If a manual override is +// in place, either through the `DOCKER_API_VERSION` environment variable, or if +// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no +// negotiation is performed. +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if !cli.manualOverride { + cli.negotiateAPIVersionPing(p) + } +} + +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) negotiateAPIVersionPing(p types.Ping) { + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(p.APIVersion, cli.version) { + cli.version = p.APIVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + c := *cli.client + return &c +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { + protoAddrParts := strings.SplitN(host, "://", 2) + if len(protoAddrParts) == 1 { + return nil, fmt.Errorf("unable to parse docker host `%s`", host) + } + + var basePath string + proto, addr := protoAddrParts[0], protoAddrParts[1] + if proto == "tcp" { + parsed, err := url.Parse("tcp://" + addr) + if err != nil { + return nil, err + } + addr = parsed.Host + basePath = parsed.Path + } + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil +} + +// CustomHTTPHeaders returns the custom http headers stored by the client. +func (cli *Client) CustomHTTPHeaders() map[string]string { + m := make(map[string]string) + for k, v := range cli.customHTTPHeaders { + m[k] = v + } + return m +} + +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. +// Deprecated: use WithHTTPHeaders when creating the client. +func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { + cli.customHTTPHeaders = headers +} + +// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/docker/docker/client/client_deprecated.go new file mode 100644 index 0000000000..54cdfc29a8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_deprecated.go @@ -0,0 +1,23 @@ +package client + +import "net/http" + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// Deprecated: use NewClientWithOpts +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use NewClientWithOpts(FromEnv) +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} diff --git a/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/docker/docker/client/client_unix.go new file mode 100644 index 0000000000..9d0f0dcbf0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_unix.go @@ -0,0 +1,9 @@ +// +build linux freebsd openbsd netbsd darwin solaris illumos dragonfly + +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/docker/docker/client/client_windows.go new file mode 100644 index 0000000000..c649e54412 --- /dev/null +++ b/vendor/github.com/docker/docker/client/client_windows.go @@ -0,0 +1,7 @@ +package client // import "github.com/docker/docker/client" + +// DefaultDockerHost defines os specific default if DOCKER_HOST is unset +const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 0000000000..ee7d411df0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 0000000000..7d0ce3e11c --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + } + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 0000000000..565acc6e27 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + return configs, err +} diff --git a/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 0000000000..a708fcaecf --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "config", id) +} diff --git a/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 0000000000..39e59cf858 --- /dev/null +++ b/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go new file mode 100644 index 0000000000..88ba1ef639 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerAttach attaches a connection to a container in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { + query := url.Values{} + if options.Stream { + query.Set("stream", "1") + } + if options.Stdin { + query.Set("stdin", "1") + } + if options.Stdout { + query.Set("stdout", "1") + } + if options.Stderr { + query.Set("stderr", "1") + } + if options.DetachKeys != "" { + query.Set("detachKeys", options.DetachKeys) + } + if options.Logs { + query.Set("logs", "1") + } + + headers := map[string][]string{"Content-Type": {"text/plain"}} + return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go new file mode 100644 index 0000000000..2966e88c8e --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -0,0 +1,55 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "errors" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ContainerCommit applies changes into a container and creates a new tagged image. +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { + var repository, tag string + if options.Reference != "" { + ref, err := reference.ParseNormalizedNamed(options.Reference) + if err != nil { + return types.IDResponse{}, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") + } + ref = reference.TagNameOnly(ref) + + if tagged, ok := ref.(reference.Tagged); ok { + tag = tagged.Tag() + } + repository = reference.FamiliarName(ref) + } + + query := url.Values{} + query.Set("container", container) + query.Set("repo", repository) + query.Set("tag", tag) + query.Set("comment", options.Comment) + query.Set("author", options.Author) + for _, change := range options.Changes { + query.Add("changes", change) + } + if !options.Pause { + query.Set("pause", "0") + } + + var response types.IDResponse + resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/docker/docker/client/container_copy.go new file mode 100644 index 0000000000..bb278bf7f3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_copy.go @@ -0,0 +1,103 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types" +) + +// ContainerStatPath returns Stat information about a path inside the container filesystem. +func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { + query := url.Values{} + query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + + urlStr := "/containers/" + containerID + "/archive" + response, err := cli.head(ctx, urlStr, query, nil) + defer ensureReaderClosed(response) + if err != nil { + return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) + } + return getContainerPathStatFromHeader(response.header) +} + +// CopyToContainer copies content into the container filesystem. +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { + query := url.Values{} + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. + // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. + if !options.AllowOverwriteDirWithFile { + query.Set("noOverwriteDirNonDir", "true") + } + + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" + + response, err := cli.putRaw(ctx, apiPath, query, content, nil) + defer ensureReaderClosed(response) + if err != nil { + return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) + } + + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior + if response.statusCode != http.StatusOK { + return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + return nil +} + +// CopyFromContainer gets the content from the container and returns it as a Reader +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { + query := make(url.Values, 1) + query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. + + apiPath := "/containers/" + containerID + "/archive" + response, err := cli.get(ctx, apiPath, query, nil) + if err != nil { + return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) + } + + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior + if response.statusCode != http.StatusOK { + return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) + } + + // In order to get the copy behavior right, we need to know information + // about both the source and the destination. The response headers include + // stat info about the source that we can use in deciding exactly how to + // copy it locally. Along with the stat info about the local destination, + // we have everything we need to handle the multiple possibilities there + // can be when copying a file/dir from one location to another file/dir. + stat, err := getContainerPathStatFromHeader(response.header) + if err != nil { + return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) + } + return response.body, stat, err +} + +func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { + var stat types.ContainerPathStat + + encodedStat := header.Get("X-Docker-Container-Path-Stat") + statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) + + err := json.NewDecoder(statDecoder).Decode(&stat) + if err != nil { + err = fmt.Errorf("unable to decode container path stat header: %s", err) + } + + return stat, err +} diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go new file mode 100644 index 0000000000..b1d5fea5bd --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -0,0 +1,63 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/containerd/containerd/platforms" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/versions" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +type configWrapper struct { + *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *specs.Platform +} + +// ContainerCreate creates a new container based in the given configuration. +// It can be associated with a name, but it's not mandatory. +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *specs.Platform, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + + // When using API 1.24 and under, the client is responsible for removing the container + if hostConfig != nil && versions.LessThan(cli.ClientVersion(), "1.25") { + hostConfig.AutoRemove = false + } + + if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil { + return response, err + } + + query := url.Values{} + if platform != nil { + query.Set("platform", platforms.Format(*platform)) + } + + if containerName != "" { + query.Set("name", containerName) + } + + body := configWrapper{ + Config: config, + HostConfig: hostConfig, + NetworkingConfig: networkingConfig, + } + + serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/docker/docker/client/container_diff.go new file mode 100644 index 0000000000..29dac8491d --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_diff.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" +) + +// ContainerDiff shows differences in a container filesystem since it was started. +func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]container.ContainerChangeResponseItem, error) { + var changes []container.ContainerChangeResponseItem + + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return changes, err + } + + err = json.NewDecoder(serverResp.body).Decode(&changes) + return changes, err +} diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go new file mode 100644 index 0000000000..e3ee755b71 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -0,0 +1,54 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ContainerExecCreate creates a new exec configuration to run an exec process. +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} + +// ContainerExecStart starts an exec process already created in the docker host. +func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { + resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) + ensureReaderClosed(resp) + return err +} + +// ContainerExecAttach attaches a connection to an exec process in the server. +// It returns a types.HijackedConnection with the hijacked connection +// and the a reader to get output. It's up to the called to close +// the hijacked connection by calling types.HijackedResponse.Close. +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { + headers := map[string][]string{"Content-Type": {"application/json"}} + return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) +} + +// ContainerExecInspect returns information about a specific exec process on the docker host. +func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { + var response types.ContainerExecInspect + resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/docker/docker/client/container_export.go new file mode 100644 index 0000000000..d0c0a5cbad --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_export.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ContainerExport retrieves the raw contents of a container +// and returns them as an io.ReadCloser. It's up to the caller +// to close the stream. +func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) + if err != nil { + return nil, err + } + + return serverResp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/docker/docker/client/container_inspect.go new file mode 100644 index 0000000000..c496bcffea --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_inspect.go @@ -0,0 +1,53 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerInspect returns the container information. +func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) + } + + var response types.ContainerJSON + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} + +// ContainerInspectWithRaw returns the container information and its raw representation. +func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } + query := url.Values{} + if getSize { + query.Set("size", "1") + } + serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ContainerJSON{}, nil, err + } + + var response types.ContainerJSON + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/docker/docker/client/container_kill.go new file mode 100644 index 0000000000..4d6f1d23da --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_kill.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerKill terminates the container process but does not remove the container from the docker host. +func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { + query := url.Values{} + query.Set("signal", signal) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go new file mode 100644 index 0000000000..a973de597f --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -0,0 +1,57 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainerList returns the list of containers in the docker host. +func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { + query := url.Values{} + + if options.All { + query.Set("all", "1") + } + + if options.Limit != -1 { + query.Set("limit", strconv.Itoa(options.Limit)) + } + + if options.Since != "" { + query.Set("since", options.Since) + } + + if options.Before != "" { + query.Set("before", options.Before) + } + + if options.Size { + query.Set("size", "1") + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var containers []types.Container + err = json.NewDecoder(resp.body).Decode(&containers) + return containers, err +} diff --git a/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/docker/docker/client/container_logs.go new file mode 100644 index 0000000000..5b6541f035 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_logs.go @@ -0,0 +1,80 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ContainerLogs returns the logs generated by a container in an io.ReadCloser. +// It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. +func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) + if err != nil { + return nil, wrapResponseError(err, resp, "container", container) + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/docker/docker/client/container_pause.go new file mode 100644 index 0000000000..5e7271a371 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_pause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerPause pauses the main process of a given container without terminating it. +func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go new file mode 100644 index 0000000000..04383deaaf --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ContainersPrune requests the daemon to delete unused data +func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) { + var report types.ContainersPruneReport + + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/docker/docker/client/container_remove.go new file mode 100644 index 0000000000..df81461b88 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_remove.go @@ -0,0 +1,27 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerRemove kills and removes a container from the docker host. +func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { + query := url.Values{} + if options.RemoveVolumes { + query.Set("v", "1") + } + if options.RemoveLinks { + query.Set("link", "1") + } + + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "container", containerID) +} diff --git a/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/docker/docker/client/container_rename.go new file mode 100644 index 0000000000..240fdf552b --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_rename.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// ContainerRename changes the name of a given container. +func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { + query := url.Values{} + query.Set("name", newContainerName) + resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/docker/docker/client/container_resize.go new file mode 100644 index 0000000000..a9d4c0c79a --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_resize.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// ContainerResize changes the size of the tty for a container. +func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) +} + +// ContainerExecResize changes the size of the tty for an exec process running inside a container. +func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { + return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) +} + +func (cli *Client) resize(ctx context.Context, basePath string, height, width uint) error { + query := url.Values{} + query.Set("h", strconv.Itoa(int(height))) + query.Set("w", strconv.Itoa(int(width))) + + resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/docker/docker/client/container_restart.go new file mode 100644 index 0000000000..41e421969f --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_restart.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerRestart stops and starts a container again. +// It makes the daemon to wait for the container to be up again for +// a specific amount of time, given the timeout. +func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go new file mode 100644 index 0000000000..c2e0b15dca --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStart sends a request to the docker daemon to start a container. +func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { + query := url.Values{} + if len(options.CheckpointID) != 0 { + query.Set("checkpoint", options.CheckpointID) + } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } + + resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go new file mode 100644 index 0000000000..0a6488dde8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -0,0 +1,42 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ContainerStats returns near realtime stats for a given container. +// It's up to the caller to close the io.ReadCloser returned. +func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + if stream { + query.Set("stream", "1") + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} + +// ContainerStatsOneShot gets a single stat entry from a container. +// It differs from `ContainerStats` in that the API should not wait to prime the stats +func (cli *Client) ContainerStatsOneShot(ctx context.Context, containerID string) (types.ContainerStats, error) { + query := url.Values{} + query.Set("stream", "0") + query.Set("one-shot", "1") + + resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) + if err != nil { + return types.ContainerStats{}, err + } + + osType := getDockerOS(resp.header.Get("Server")) + return types.ContainerStats{Body: resp.body, OSType: osType}, err +} diff --git a/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/docker/docker/client/container_stop.go new file mode 100644 index 0000000000..629d7ab64c --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_stop.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "time" + + timetypes "github.com/docker/docker/api/types/time" +) + +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. +func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { + query := url.Values{} + if timeout != nil { + query.Set("t", timetypes.DurationToSecondsString(*timeout)) + } + resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/docker/docker/client/container_top.go new file mode 100644 index 0000000000..a5b78999bf --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_top.go @@ -0,0 +1,28 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strings" + + "github.com/docker/docker/api/types/container" +) + +// ContainerTop shows process information from within a container. +func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (container.ContainerTopOKBody, error) { + var response container.ContainerTopOKBody + query := url.Values{} + if len(arguments) > 0 { + query.Set("ps_args", strings.Join(arguments, " ")) + } + + resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/docker/docker/client/container_unpause.go new file mode 100644 index 0000000000..1d8f873169 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_unpause.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ContainerUnpause resumes the process execution within a container +func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { + resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go new file mode 100644 index 0000000000..6917cf9fb3 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/container" +) + +// ContainerUpdate updates resources of a container +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody + serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go new file mode 100644 index 0000000000..6ab8c1da96 --- /dev/null +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -0,0 +1,83 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" +) + +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) + if err != nil { + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC + } + + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} diff --git a/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/docker/docker/client/disk_usage.go new file mode 100644 index 0000000000..354cd36939 --- /dev/null +++ b/vendor/github.com/docker/docker/client/disk_usage.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" +) + +// DiskUsage requests the current data usage from the daemon +func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { + var du types.DiskUsage + + serverResp, err := cli.get(ctx, "/system/df", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return du, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { + return du, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return du, nil +} diff --git a/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 0000000000..f4e3794cb4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + return distributionInspect, err +} diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go new file mode 100644 index 0000000000..041bc8d49c --- /dev/null +++ b/vendor/github.com/docker/docker/client/errors.go @@ -0,0 +1,138 @@ +package client // import "github.com/docker/docker/client" + +import ( + "fmt" + "net/http" + + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// errConnectionFailed implements an error returned when connection failed. +type errConnectionFailed struct { + host string +} + +// Error returns a string representation of an errConnectionFailed +func (err errConnectionFailed) Error() string { + if err.host == "" { + return "Cannot connect to the Docker daemon. Is the docker daemon running on this host?" + } + return fmt.Sprintf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", err.host) +} + +// IsErrConnectionFailed returns true if the error is caused by connection failed. +func IsErrConnectionFailed(err error) bool { + return errors.As(err, &errConnectionFailed{}) +} + +// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed. +func ErrorConnectionFailed(host string) error { + return errConnectionFailed{host: host} +} + +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility +type notFound interface { + error + NotFound() bool +} + +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. +func IsErrNotFound(err error) bool { + var e notFound + if errors.As(err, &e) { + return true + } + return errdefs.IsNotFound(err) +} + +type objectNotFoundError struct { + object string + id string +} + +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) +} + +func wrapResponseError(err error, resp serverResponse, object, id string) error { + switch { + case err == nil: + return nil + case resp.statusCode == http.StatusNotFound: + return objectNotFoundError{object: object, id: id} + case resp.statusCode == http.StatusNotImplemented: + return errdefs.NotImplemented(err) + default: + return err + } +} + +// unauthorizedError represents an authorization error in a remote registry. +type unauthorizedError struct { + cause error +} + +// Error returns a string representation of an unauthorizedError +func (u unauthorizedError) Error() string { + return u.cause.Error() +} + +// IsErrUnauthorized returns true if the error is caused +// when a remote registry authentication fails +func IsErrUnauthorized(err error) bool { + if _, ok := err.(unauthorizedError); ok { + return ok + } + return errdefs.IsUnauthorized(err) +} + +type pluginPermissionDenied struct { + name string +} + +func (e pluginPermissionDenied) Error() string { + return "Permission denied while installing plugin " + e.name +} + +// IsErrPluginPermissionDenied returns true if the error is caused +// when a user denies a plugin's permissions +func IsErrPluginPermissionDenied(err error) bool { + _, ok := err.(pluginPermissionDenied) + return ok +} + +type notImplementedError struct { + message string +} + +func (e notImplementedError) Error() string { + return e.message +} + +func (e notImplementedError) NotImplemented() bool { + return true +} + +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +func IsErrNotImplemented(err error) bool { + if _, ok := err.(notImplementedError); ok { + return ok + } + return errdefs.IsNotImplemented(err) +} + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/docker/docker/client/events.go new file mode 100644 index 0000000000..f0dc9d9e12 --- /dev/null +++ b/vendor/github.com/docker/docker/client/events.go @@ -0,0 +1,102 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + timetypes "github.com/docker/docker/api/types/time" +) + +// Events returns a stream of events in the daemon. It's up to the caller to close the stream +// by cancelling the context. Once the stream has been completely read an io.EOF error will +// be sent over the error channel. If an error is sent all processing will be stopped. It's up +// to the caller to reopen the stream in the event of an error by reinvoking this method. +func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) { + + messages := make(chan events.Message) + errs := make(chan error, 1) + + started := make(chan struct{}) + go func() { + defer close(errs) + + query, err := buildEventsQueryParams(cli.version, options) + if err != nil { + close(started) + errs <- err + return + } + + resp, err := cli.get(ctx, "/events", query, nil) + if err != nil { + close(started) + errs <- err + return + } + defer resp.body.Close() + + decoder := json.NewDecoder(resp.body) + + close(started) + for { + select { + case <-ctx.Done(): + errs <- ctx.Err() + return + default: + var event events.Message + if err := decoder.Decode(&event); err != nil { + errs <- err + return + } + + select { + case messages <- event: + case <-ctx.Done(): + errs <- ctx.Err() + return + } + } + } + }() + <-started + + return messages, errs +} + +func buildEventsQueryParams(cliVersion string, options types.EventsOptions) (url.Values, error) { + query := url.Values{} + ref := time.Now() + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, ref) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, ref) + if err != nil { + return nil, err + } + query.Set("until", ts) + } + + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cliVersion, options.Filters) + if err != nil { + return nil, err + } + query.Set("filters", filterJSON) + } + + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go new file mode 100644 index 0000000000..e1dc49ef0f --- /dev/null +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -0,0 +1,145 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bufio" + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httputil" + "net/url" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/go-connections/sockets" + "github.com/pkg/errors" +) + +// postHijacked sends a POST request and hijacks the connection. +func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { + bodyEncoded, err := encodeData(body) + if err != nil { + return types.HijackedResponse{}, err + } + + apiPath := cli.getAPIPath(ctx, path, query) + req, err := http.NewRequest(http.MethodPost, apiPath, bodyEncoded) + if err != nil { + return types.HijackedResponse{}, err + } + req = cli.addHeaders(req, headers) + + conn, err := cli.setupHijackConn(ctx, req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest(http.MethodPost, url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(ctx, req, proto) +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { + req.Host = cli.addr + req.Header.Set("Connection", "Upgrade") + req.Header.Set("Upgrade", proto) + + dialer := cli.Dialer() + conn, err := dialer(ctx) + if err != nil { + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") + } + + // When we set up a TCP connection for hijack, there could be long periods + // of inactivity (a long running command with no output) that in certain + // network setups may cause ECONNTIMEOUT, leaving the client in an unknown + // state. Setting TCP KeepAlive on the socket connection will prohibit + // ECONNTIMEOUT unless the socket connection truly is broken + if tcpConn, ok := conn.(*net.TCPConn); ok { + tcpConn.SetKeepAlive(true) + tcpConn.SetKeepAlivePeriod(30 * time.Second) + } + + clientconn := httputil.NewClientConn(conn, nil) + defer clientconn.Close() + + // Server hijacks the connection, error 'connection closed' expected + resp, err := clientconn.Do(req) + + //nolint:staticcheck // ignore SA1019 for connecting to old (pre go1.8) daemons + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) + } + } + + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite iff the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } + } else { + br.Reset(nil) + } + + return c, nil +} + +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() +} diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go new file mode 100644 index 0000000000..8fcf995036 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -0,0 +1,146 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/base64" + "encoding/json" + "io" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" +) + +// ImageBuild sends request to the daemon to build images. +// The Body in the response implement an io.ReadCloser and it's up to the caller to +// close it. +func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { + query, err := cli.imageBuildOptionsToQuery(options) + if err != nil { + return types.ImageBuildResponse{}, err + } + + headers := http.Header(make(map[string][]string)) + buf, err := json.Marshal(options.AuthConfigs) + if err != nil { + return types.ImageBuildResponse{}, err + } + headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + + headers.Set("Content-Type", "application/x-tar") + + serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) + if err != nil { + return types.ImageBuildResponse{}, err + } + + osType := getDockerOS(serverResp.header.Get("Server")) + + return types.ImageBuildResponse{ + Body: serverResp.body, + OSType: osType, + }, nil +} + +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { + query := url.Values{ + "t": options.Tags, + "securityopt": options.SecurityOpt, + "extrahosts": options.ExtraHosts, + } + if options.SuppressOutput { + query.Set("q", "1") + } + if options.RemoteContext != "" { + query.Set("remote", options.RemoteContext) + } + if options.NoCache { + query.Set("nocache", "1") + } + if options.Remove { + query.Set("rm", "1") + } else { + query.Set("rm", "0") + } + + if options.ForceRemove { + query.Set("forcerm", "1") + } + + if options.PullParent { + query.Set("pull", "1") + } + + if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } + query.Set("squash", "1") + } + + if !container.Isolation.IsDefault(options.Isolation) { + query.Set("isolation", string(options.Isolation)) + } + + query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) + query.Set("cpusetmems", options.CPUSetMems) + query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) + query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) + query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) + query.Set("memory", strconv.FormatInt(options.Memory, 10)) + query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) + query.Set("cgroupparent", options.CgroupParent) + query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) + query.Set("dockerfile", options.Dockerfile) + query.Set("target", options.Target) + + ulimitsJSON, err := json.Marshal(options.Ulimits) + if err != nil { + return query, err + } + query.Set("ulimits", string(ulimitsJSON)) + + buildArgsJSON, err := json.Marshal(options.BuildArgs) + if err != nil { + return query, err + } + query.Set("buildargs", string(buildArgsJSON)) + + labelsJSON, err := json.Marshal(options.Labels) + if err != nil { + return query, err + } + query.Set("labels", string(labelsJSON)) + + cacheFromJSON, err := json.Marshal(options.CacheFrom) + if err != nil { + return query, err + } + query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) + + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/docker/docker/client/image_create.go new file mode 100644 index 0000000000..239380474e --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_create.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageCreate creates a new image based in the parent options. +// It returns the JSON content in the response body. +func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(parentReference) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/create", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/docker/docker/client/image_history.go new file mode 100644 index 0000000000..b5bea10d8f --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_history.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/image" +) + +// ImageHistory returns the changes in an image in history format. +func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { + var history []image.HistoryResponseItem + serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return history, err + } + + err = json.NewDecoder(serverResp.body).Decode(&history) + return history, err +} diff --git a/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/docker/docker/client/image_import.go new file mode 100644 index 0000000000..d3336d4106 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_import.go @@ -0,0 +1,40 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" +) + +// ImageImport creates a new image based in the source options. +// It returns the JSON content in the response body. +func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { + if ref != "" { + // Check if the given image name can be resolved + if _, err := reference.ParseNormalizedNamed(ref); err != nil { + return nil, err + } + } + + query := url.Values{} + query.Set("fromSrc", source.SourceName) + query.Set("repo", ref) + query.Set("tag", options.Tag) + query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + for _, change := range options.Changes { + query.Add("changes", change) + } + + resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/docker/docker/client/image_inspect.go new file mode 100644 index 0000000000..1eb8dce025 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types" +) + +// ImageInspectWithRaw returns the image information and its raw representation. +func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} + } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return types.ImageInspect{}, nil, err + } + + var response types.ImageInspect + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go new file mode 100644 index 0000000000..a4d7505094 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -0,0 +1,46 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/versions" +) + +// ImageList returns a list of images in the docker host. +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary + query := url.Values{} + + optionFilters := options.Filters + referenceFilters := optionFilters.Get("reference") + if versions.LessThan(cli.version, "1.25") && len(referenceFilters) > 0 { + query.Set("filter", referenceFilters[0]) + for _, filterValue := range referenceFilters { + optionFilters.Del("reference", filterValue) + } + } + if optionFilters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, optionFilters) + if err != nil { + return images, err + } + query.Set("filters", filterJSON) + } + if options.All { + query.Set("all", "1") + } + + serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return images, err + } + + err = json.NewDecoder(serverResp.body).Decode(&images) + return images, err +} diff --git a/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/docker/docker/client/image_load.go new file mode 100644 index 0000000000..91016e493c --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_load.go @@ -0,0 +1,29 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageLoad loads an image in the docker host from the client host. +// It's up to the caller to close the io.ReadCloser in the +// ImageLoadResponse returned by this function. +func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { + v := url.Values{} + v.Set("quiet", "0") + if quiet { + v.Set("quiet", "1") + } + headers := map[string][]string{"Content-Type": {"application/x-tar"}} + resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) + if err != nil { + return types.ImageLoadResponse{}, err + } + return types.ImageLoadResponse{ + Body: resp.body, + JSON: resp.header.Get("Content-Type") == "application/json", + }, nil +} diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go new file mode 100644 index 0000000000..56af6d7f98 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// ImagesPrune requests the daemon to delete unused data +func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (types.ImagesPruneReport, error) { + var report types.ImagesPruneReport + + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/docker/docker/client/image_pull.go new file mode 100644 index 0000000000..a23975591b --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_pull.go @@ -0,0 +1,64 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePull requests the docker host to pull an image from a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +// +// FIXME(vdemeester): there is currently used in a few way in docker/docker +// - if not in trusted content, ref is used to pass the whole reference, and tag is empty +// - if in trusted content, ref is used to pass the reference name, and tag for the digest +func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.ImagePullOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(refStr) + if err != nil { + return nil, err + } + + query := url.Values{} + query.Set("fromImage", reference.FamiliarName(ref)) + if !options.All { + query.Set("tag", getAPITagFromNamedRef(ref)) + } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } + + resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +// getAPITagFromNamedRef returns a tag from the specified reference. +// This function is necessary as long as the docker "server" api expects +// digests to be sent as tags and makes a distinction between the name +// and tag/digest part of a reference. +func getAPITagFromNamedRef(ref reference.Named) string { + if digested, ok := ref.(reference.Digested); ok { + return digested.Digest().String() + } + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + return tagged.Tag() + } + return "" +} diff --git a/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/docker/docker/client/image_push.go new file mode 100644 index 0000000000..845580d4a4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_push.go @@ -0,0 +1,54 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "errors" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// ImagePush requests the docker host to push an image to a remote registry. +// It executes the privileged function if the operation is unauthorized +// and it tries one more time. +// It's up to the caller to handle the io.ReadCloser and close it properly. +func (cli *Client) ImagePush(ctx context.Context, image string, options types.ImagePushOptions) (io.ReadCloser, error) { + ref, err := reference.ParseNormalizedNamed(image) + if err != nil { + return nil, err + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return nil, errors.New("cannot push a digest reference") + } + + name := reference.FamiliarName(ref) + query := url.Values{} + if !options.All { + ref = reference.TagNameOnly(ref) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + } + + resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return nil, privilegeErr + } + resp, err = cli.tryImagePush(ctx, name, query, newAuthHeader) + } + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/docker/docker/client/image_remove.go new file mode 100644 index 0000000000..84a41af0f2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_remove.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" +) + +// ImageRemove removes an image from the docker host. +func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) { + query := url.Values{} + + if options.Force { + query.Set("force", "1") + } + if !options.PruneChildren { + query.Set("noprune", "1") + } + + var dels []types.ImageDeleteResponseItem + resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return dels, wrapResponseError(err, resp, "image", imageID) + } + + err = json.NewDecoder(resp.body).Decode(&dels) + return dels, err +} diff --git a/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/docker/docker/client/image_save.go new file mode 100644 index 0000000000..d1314e4b22 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_save.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" +) + +// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. +// It's up to the caller to store the images and close the stream. +func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { + query := url.Values{ + "names": imageIDs, + } + + resp, err := cli.get(ctx, "/images/get", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/docker/docker/client/image_search.go new file mode 100644 index 0000000000..82955a7477 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_search.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" +) + +// ImageSearch makes the docker host to search by a term in a remote registry. +// The list of results is not sorted in any fashion. +func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { + var results []registry.SearchResult + query := url.Values{} + query.Set("term", term) + query.Set("limit", fmt.Sprintf("%d", options.Limit)) + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return results, err + } + query.Set("filters", filterJSON) + } + + resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + return results, privilegeErr + } + resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) + } + if err != nil { + return results, err + } + + err = json.NewDecoder(resp.body).Decode(&results) + return results, err +} + +func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/images/search", query, headers) +} diff --git a/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/docker/docker/client/image_tag.go new file mode 100644 index 0000000000..5652bfc252 --- /dev/null +++ b/vendor/github.com/docker/docker/client/image_tag.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/pkg/errors" +) + +// ImageTag tags an image in the docker host +func (cli *Client) ImageTag(ctx context.Context, source, target string) error { + if _, err := reference.ParseAnyReference(source); err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", source) + } + + ref, err := reference.ParseNormalizedNamed(target) + if err != nil { + return errors.Wrapf(err, "Error parsing reference: %q is not a valid repository/tag", target) + } + + if _, isCanonical := ref.(reference.Canonical); isCanonical { + return errors.New("refusing to create a tag with a digest reference") + } + + ref = reference.TagNameOnly(ref) + + query := url.Values{} + query.Set("repo", reference.FamiliarName(ref)) + if tagged, ok := ref.(reference.Tagged); ok { + query.Set("tag", tagged.Tag()) + } + + resp, err := cli.post(ctx, "/images/"+source+"/tag", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/docker/docker/client/info.go new file mode 100644 index 0000000000..c856704e23 --- /dev/null +++ b/vendor/github.com/docker/docker/client/info.go @@ -0,0 +1,26 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" +) + +// Info returns information about the docker server. +func (cli *Client) Info(ctx context.Context) (types.Info, error) { + var info types.Info + serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return info, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { + return info, fmt.Errorf("Error reading remote info: %v", err) + } + + return info, nil +} diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go new file mode 100644 index 0000000000..aabad4a911 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface.go @@ -0,0 +1,201 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net" + "net/http" + "time" + + "github.com/docker/docker/api/types" + containertypes "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/events" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/image" + networktypes "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// CommonAPIClient is the common methods between stable and experimental versions of APIClient. +type CommonAPIClient interface { + ConfigAPIClient + ContainerAPIClient + DistributionAPIClient + ImageAPIClient + NodeAPIClient + NetworkAPIClient + PluginAPIClient + ServiceAPIClient + SwarmAPIClient + SecretAPIClient + SystemAPIClient + VolumeAPIClient + ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client + ServerVersion(ctx context.Context) (types.Version, error) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) + Close() error +} + +// ContainerAPIClient defines API client methods for the containers +type ContainerAPIClient interface { + ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, platform *specs.Platform, containerName string) (containertypes.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) + ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) + ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error + ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error + ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) + ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) + ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) + ContainerKill(ctx context.Context, container, signal string) error + ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) + ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) + ContainerPause(ctx context.Context, container string) error + ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error + ContainerRename(ctx context.Context, container, newContainerName string) error + ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error + ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error + ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) + ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) + ContainerStatsOneShot(ctx context.Context, container string) (types.ContainerStats, error) + ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error + ContainerStop(ctx context.Context, container string, timeout *time.Duration) error + ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) + ContainerUnpause(ctx context.Context, container string) error + ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) + CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) + CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error + ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) +} + +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + +// ImageAPIClient defines API client methods for the images +type ImageAPIClient interface { + ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error + ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) + ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) + ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) + ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) + ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) + ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) + ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) + ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDeleteResponseItem, error) + ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) + ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) + ImageTag(ctx context.Context, image, ref string) error + ImagesPrune(ctx context.Context, pruneFilter filters.Args) (types.ImagesPruneReport, error) +} + +// NetworkAPIClient defines API client methods for the networks +type NetworkAPIClient interface { + NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error + NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) + NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) + NetworkRemove(ctx context.Context, network string) error + NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) +} + +// NodeAPIClient defines API client methods for the nodes +type NodeAPIClient interface { + NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) + NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) + NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error + NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error +} + +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error + PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (io.ReadCloser, error) + PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + +// ServiceAPIClient defines API client methods for the services +type ServiceAPIClient interface { + ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) + ServiceInspectWithRaw(ctx context.Context, serviceID string, options types.ServiceInspectOptions) (swarm.Service, []byte, error) + ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) + ServiceRemove(ctx context.Context, serviceID string) error + ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) + TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) + TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) +} + +// SwarmAPIClient defines API client methods for the swarm +type SwarmAPIClient interface { + SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) + SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error + SwarmLeave(ctx context.Context, force bool) error + SwarmInspect(ctx context.Context) (swarm.Swarm, error) + SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error +} + +// SystemAPIClient defines API client methods for the system +type SystemAPIClient interface { + Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) + Info(ctx context.Context) (types.Info, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) + DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) +} + +// VolumeAPIClient defines API client methods for the volumes +type VolumeAPIClient interface { + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) + VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) + VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) + VolumeRemove(ctx context.Context, volumeID string, force bool) error + VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) +} + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) + SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error +} + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go new file mode 100644 index 0000000000..402ffb512c --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +type apiClientExperimental interface { + CheckpointAPIClient +} + +// CheckpointAPIClient defines API client methods for the checkpoints +type CheckpointAPIClient interface { + CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go new file mode 100644 index 0000000000..5502cd7426 --- /dev/null +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +// APIClient is an interface that clients that talk with a docker server must implement. +type APIClient interface { + CommonAPIClient + apiClientExperimental +} + +// Ensure that Client always implements APIClient. +var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go new file mode 100644 index 0000000000..f058520638 --- /dev/null +++ b/vendor/github.com/docker/docker/client/login.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" +) + +// RegistryLogin authenticates the docker server with a given docker registry. +// It returns unauthorizedError when the authentication fails. +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { + resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) + + if err != nil { + return registry.AuthenticateOKBody{}, err + } + + var response registry.AuthenticateOKBody + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/docker/docker/client/network_connect.go new file mode 100644 index 0000000000..5718946134 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_connect.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/network" +) + +// NetworkConnect connects a container to an existent network in the docker host. +func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { + nc := types.NetworkConnect{ + Container: containerID, + EndpointConfig: config, + } + resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/docker/docker/client/network_create.go new file mode 100644 index 0000000000..278d9383a8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// NetworkCreate creates a new network in the docker host. +func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { + networkCreateRequest := types.NetworkCreateRequest{ + NetworkCreate: options, + Name: name, + } + var response types.NetworkCreateResponse + serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return response, err + } + + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/docker/docker/client/network_disconnect.go new file mode 100644 index 0000000000..dd15676656 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_disconnect.go @@ -0,0 +1,15 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types" +) + +// NetworkDisconnect disconnects a container from an existent network in the docker host. +func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { + nd := types.NetworkDisconnect{Container: containerID, Force: force} + resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/docker/docker/client/network_inspect.go new file mode 100644 index 0000000000..89a05b3021 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_inspect.go @@ -0,0 +1,49 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NetworkInspect returns the information for a specific network configured in the docker host. +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) + return networkResource, err +} + +// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } + var ( + networkResource types.NetworkResource + resp serverResponse + err error + ) + query := url.Values{} + if options.Verbose { + query.Set("verbose", "true") + } + if options.Scope != "" { + query.Set("scope", options.Scope) + } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResource, nil, wrapResponseError(err, resp, "network", networkID) + } + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return networkResource, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&networkResource) + return networkResource, body, err +} diff --git a/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/docker/docker/client/network_list.go new file mode 100644 index 0000000000..ed2acb5571 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_list.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworkList returns the list of networks configured in the docker host. +func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { + query := url.Values{} + if options.Filters.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + var networkResources []types.NetworkResource + resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return networkResources, err + } + err = json.NewDecoder(resp.body).Decode(&networkResources) + return networkResources, err +} diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 0000000000..cebb188219 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + if err := cli.NewVersionError("1.25", "network prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/docker/docker/client/network_remove.go new file mode 100644 index 0000000000..e71b16d869 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// NetworkRemove removes an existent network from the docker host. +func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { + resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "network", networkID) +} diff --git a/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/docker/docker/client/node_inspect.go new file mode 100644 index 0000000000..d296c9fdde --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeInspectWithRaw returns the node information. +func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} + } + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Node{}, nil, err + } + + var response swarm.Node + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go new file mode 100644 index 0000000000..c212906bc7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// NodeList returns the list of nodes. +func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var nodes []swarm.Node + err = json.NewDecoder(resp.body).Decode(&nodes) + return nodes, err +} diff --git a/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/docker/docker/client/node_remove.go new file mode 100644 index 0000000000..03ab878097 --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// NodeRemove removes a Node. +func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "node", nodeID) +} diff --git a/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/docker/docker/client/node_update.go new file mode 100644 index 0000000000..de32a617fb --- /dev/null +++ b/vendor/github.com/docker/docker/client/node_update.go @@ -0,0 +1,18 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// NodeUpdate updates a Node. +func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/docker/docker/client/options.go new file mode 100644 index 0000000000..6f77f0955f --- /dev/null +++ b/vendor/github.com/docker/docker/client/options.go @@ -0,0 +1,172 @@ +package client + +import ( + "context" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + +// FromEnv configures the client with values from environment variables. +// +// Supported environment variables: +// DOCKER_HOST to set the url to the docker server. +// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// DOCKER_CERT_PATH to load the TLS certificates from. +// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func FromEnv(c *Client) error { + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + } + + if host := os.Getenv("DOCKER_HOST"); host != "" { + if err := WithHost(host)(c); err != nil { + return err + } + } + + if version := os.Getenv("DOCKER_API_VERSION"); version != "" { + if err := WithVersion(version)(c); err != nil { + return err + } + } + return nil +} + +// WithDialer applies the dialer.DialContext to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +// Deprecated: use WithDialContext +func WithDialer(dialer *net.Dialer) Opt { + return WithDialContext(dialer.DialContext) +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) Opt { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one +func WithScheme(scheme string) Opt { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 0000000000..a9af001ef4 --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,66 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/http" + "path" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" +) + +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest(http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err == nil { + defer ensureReaderClosed(serverResp) + switch serverResp.statusCode { + case http.StatusOK, http.StatusInternalServerError: + // Server handled the request, so parse the response + return parsePingResponse(cli, serverResp) + } + } else if IsErrConnectionFailed(err) { + return ping, err + } + + req, err = cli.buildRequest(http.MethodGet, path.Join(cli.basePath, "/_ping"), nil, nil) + if err != nil { + return ping, err + } + serverResp, err = cli.doRequest(ctx, req) + defer ensureReaderClosed(serverResp) + if err != nil { + return ping, err + } + return parsePingResponse(cli, serverResp) +} + +func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { + var ping types.Ping + if resp.header == nil { + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) + } + ping.APIVersion = resp.header.Get("API-Version") + ping.OSType = resp.header.Get("OSType") + if resp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + if bv := resp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 0000000000..b95dbaf686 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,23 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/x-tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go new file mode 100644 index 0000000000..01f6574f95 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginDisable disables a plugin +func (cli *Client) PluginDisable(ctx context.Context, name string, options types.PluginDisableOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/plugins/"+name+"/disable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go new file mode 100644 index 0000000000..736da48bd1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -0,0 +1,19 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" +) + +// PluginEnable enables a plugin +func (cli *Client) PluginEnable(ctx context.Context, name string, options types.PluginEnableOptions) error { + query := url.Values{} + query.Set("timeout", strconv.Itoa(options.Timeout)) + + resp, err := cli.post(ctx, "/plugins/"+name+"/enable", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go new file mode 100644 index 0000000000..81b89732b0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -0,0 +1,31 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types" +) + +// PluginInspectWithRaw inspects an existing plugin +func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } + resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, nil, wrapResponseError(err, resp, "plugin", name) + } + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return nil, nil, err + } + var p types.Plugin + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&p) + return &p, body, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go new file mode 100644 index 0000000000..012afe61ca --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -0,0 +1,113 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// PluginInstall installs a plugin +func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + // set name for plugin pull, if empty should default to remote reference + query.Set("name", name) + + resp, err := cli.tryPluginPull(ctx, query, privileges, options.RegistryAuth) + if err != nil { + return nil, err + } + + name = resp.header.Get("Docker-Plugin-Name") + + pr, pw := io.Pipe() + go func() { // todo: the client should probably be designed more around the actual api + _, err := io.Copy(pw, resp.body) + if err != nil { + pw.CloseWithError(err) + return + } + defer func() { + if err != nil { + delResp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) + ensureReaderClosed(delResp) + } + }() + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + pw.CloseWithError(err) + return + } + } + + if options.Disabled { + pw.Close() + return + } + + enableErr := cli.PluginEnable(ctx, name, types.PluginEnableOptions{Timeout: 0}) + pw.CloseWithError(enableErr) + }() + return pr, nil +} + +func (cli *Client) tryPluginPrivileges(ctx context.Context, query url.Values, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.get(ctx, "/plugins/privileges", query, headers) +} + +func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileges types.PluginPrivileges, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/pull", query, privileges, headers) +} + +func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { + resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { + // todo: do inspect before to check existing name before checking privileges + newAuthHeader, privilegeErr := options.PrivilegeFunc() + if privilegeErr != nil { + ensureReaderClosed(resp) + return nil, privilegeErr + } + options.RegistryAuth = newAuthHeader + resp, err = cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) + } + if err != nil { + ensureReaderClosed(resp) + return nil, err + } + + var privileges types.PluginPrivileges + if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { + ensureReaderClosed(resp) + return nil, err + } + ensureReaderClosed(resp) + + if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { + accept, err := options.AcceptPermissionsFunc(privileges) + if err != nil { + return nil, err + } + if !accept { + return nil, pluginPermissionDenied{options.RemoteRef} + } + } + return privileges, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go new file mode 100644 index 0000000000..cf1935e2f5 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// PluginList returns the installed plugins +func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.PluginsListResponse, error) { + var plugins types.PluginsListResponse + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return plugins, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return plugins, wrapResponseError(err, resp, "plugin", "") + } + + err = json.NewDecoder(resp.body).Decode(&plugins) + return plugins, err +} diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go new file mode 100644 index 0000000000..d20bfe8447 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" +) + +// PluginPush pushes a plugin to a registry +func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) (io.ReadCloser, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go new file mode 100644 index 0000000000..51ca1040d6 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -0,0 +1,20 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types" +) + +// PluginRemove removes a plugin +func (cli *Client) PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error { + query := url.Values{} + if options.Force { + query.Set("force", "1") + } + + resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "plugin", name) +} diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go new file mode 100644 index 0000000000..dcf5752ca2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -0,0 +1,12 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" +) + +// PluginSet modifies settings for an existing plugin +func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { + resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/docker/docker/client/plugin_upgrade.go new file mode 100644 index 0000000000..115cea945b --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/pkg/errors" +) + +// PluginUpgrade upgrades a plugin +func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } + query := url.Values{} + if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { + return nil, errors.Wrap(err, "invalid remote reference") + } + query.Set("remote", options.RemoteRef) + + privileges, err := cli.checkPluginPermissions(ctx, query, options) + if err != nil { + return nil, err + } + + resp, err := cli.tryPluginUpgrade(ctx, query, privileges, name, options.RegistryAuth) + if err != nil { + return nil, err + } + return resp.body, nil +} + +func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { + headers := map[string][]string{"X-Registry-Auth": {registryAuth}} + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) +} diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go new file mode 100644 index 0000000000..813eac2c9e --- /dev/null +++ b/vendor/github.com/docker/docker/client/request.go @@ -0,0 +1,269 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "strings" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" +) + +// serverResponse is a wrapper for http API responses. +type serverResponse struct { + body io.ReadCloser + header http.Header + statusCode int + reqURL *url.URL +} + +// head sends an http request to the docker API using the method HEAD. +func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodHead, path, query, nil, headers) +} + +// get sends an http request to the docker API using the method GET with a specific Go context. +func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodGet, path, query, nil, headers) +} + +// post sends an http request to the docker API using the method POST with a specific Go context. +func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPost, path, query, body, headers) +} + +// putRaw sends an http request to the docker API using the method PUT. +func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodPut, path, query, body, headers) +} + +// delete sends an http request to the docker API using the method DELETE. +func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (serverResponse, error) { + return cli.sendRequest(ctx, http.MethodDelete, path, query, nil, headers) +} + +type headers map[string][]string + +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil + } + + body, err := encodeData(obj) + if err != nil { + return nil, headers, err + } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} + +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { + expectedPayload := (method == http.MethodPost || method == http.MethodPut) + if expectedPayload && body == nil { + body = bytes.NewReader([]byte{}) + } + + req, err := http.NewRequest(method, path, body) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, headers) + + if cli.proto == "unix" || cli.proto == "npipe" { + // For local communications, it doesn't matter what the host is. We just + // need a valid and meaningful host name. (See #189) + req.Host = "docker" + } + + req.URL.Host = cli.addr + req.URL.Scheme = cli.scheme + + if expectedPayload && req.Header.Get("Content-Type") == "" { + req.Header.Set("Content-Type", "text/plain") + } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, errdefs.FromStatusCode(err, resp.statusCode) + } + err = cli.checkResponseErr(resp) + return resp, errdefs.FromStatusCode(err, resp.statusCode) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} + + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) + if err != nil { + if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { + return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) + } + + if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { + return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings") + } + + // Don't decorate context sentinel errors; users may be comparing to + // them directly. + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return serverResp, err + } + + if nErr, ok := err.(*url.Error); ok { + if nErr, ok := nErr.Err.(*net.OpError); ok { + if os.IsPermission(nErr.Err) { + return serverResp, errors.Wrapf(err, "Got permission denied while trying to connect to the Docker daemon socket at %v", cli.host) + } + } + } + + if err, ok := err.(net.Error); ok { + if err.Timeout() { + return serverResp, ErrorConnectionFailed(cli.host) + } + if !err.Temporary() { + if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { + return serverResp, ErrorConnectionFailed(cli.host) + } + } + } + + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + // Checks if client is running with elevated privileges + if f, elevatedErr := os.Open("\\\\.\\PHYSICALDRIVE0"); elevatedErr == nil { + err = errors.Wrap(err, "In the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect.") + } else { + f.Close() + err = errors.Wrap(err, "This error may indicate that the docker daemon is not running.") + } + } + + return serverResp, errors.Wrap(err, "error during connect") + } + + if resp != nil { + serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header + } + return serverResp, nil +} + +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + var body []byte + var err error + if serverResp.body != nil { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err = ioutil.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return errors.Wrap(err, "Error reading JSON") + } + errorMessage = strings.TrimSpace(errorResponse.Message) + } else { + errorMessage = strings.TrimSpace(string(body)) + } + + return errors.Wrap(errors.New(errorMessage), "Error response from daemon") +} + +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { + // Add CLI Config's HTTP Headers BEFORE we set the Docker headers + // then the user can't change OUR headers + for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } + req.Header.Set(k, v) + } + + if headers != nil { + for k, v := range headers { + req.Header[k] = v + } + } + return req +} + +func encodeData(data interface{}) (*bytes.Buffer, error) { + params := bytes.NewBuffer(nil) + if data != nil { + if err := json.NewEncoder(params).Encode(data); err != nil { + return nil, err + } + } + return params, nil +} + +func ensureReaderClosed(response serverResponse) { + if response.body != nil { + // Drain up to 512 bytes and close the body to let the Transport reuse the connection + io.CopyN(ioutil.Discard, response.body, 512) + response.body.Close() + } +} diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 0000000000..fd5b914136 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 0000000000..d093916c9a --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { + return swarm.Secret{}, nil, err + } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) + } + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 0000000000..a0289c9f44 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 0000000000..c16f555804 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "secret", id) +} diff --git a/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/docker/docker/client/secret_update.go new file mode 100644 index 0000000000..164256bbc1 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SecretUpdate attempts to update a Secret +func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/docker/docker/client/service_create.go new file mode 100644 index 0000000000..e0428bf98b --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_create.go @@ -0,0 +1,178 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/distribution/reference" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// ServiceCreate creates a new Service. +func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { + var response types.ServiceCreateResponse + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} + +func resolveContainerSpecImage(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.ContainerSpec.Image, encodedAuth); err != nil { + warning = digestWarning(taskSpec.ContainerSpec.Image) + } else { + taskSpec.ContainerSpec.Image = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func resolvePluginSpecRemote(ctx context.Context, cli DistributionAPIClient, taskSpec *swarm.TaskSpec, encodedAuth string) string { + var warning string + if img, imgPlatforms, err := imageDigestAndPlatforms(ctx, cli, taskSpec.PluginSpec.Remote, encodedAuth); err != nil { + warning = digestWarning(taskSpec.PluginSpec.Remote) + } else { + taskSpec.PluginSpec.Remote = img + if len(imgPlatforms) > 0 { + if taskSpec.Placement == nil { + taskSpec.Placement = &swarm.Placement{} + } + taskSpec.Placement.Platforms = imgPlatforms + } + } + return warning +} + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// image unmodified in other situations. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return image +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/docker/docker/client/service_inspect.go new file mode 100644 index 0000000000..2801483b80 --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_inspect.go @@ -0,0 +1,37 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceInspectWithRaw returns the service information and the raw data. +func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } + query := url.Values{} + query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) + serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Service{}, nil, err + } + + var response swarm.Service + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go new file mode 100644 index 0000000000..f97ec75a5c --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -0,0 +1,39 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceList returns the list of services. +func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + if options.Status { + query.Set("status", "true") + } + + resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var services []swarm.Service + err = json.NewDecoder(resp.body).Decode(&services) + return services, err +} diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 0000000000..906fd4059e --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "since"`) + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/docker/docker/client/service_remove.go new file mode 100644 index 0000000000..953a2adf5a --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_remove.go @@ -0,0 +1,10 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ServiceRemove kills and removes a service. +func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { + resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "service", serviceID) +} diff --git a/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/docker/docker/client/service_update.go new file mode 100644 index 0000000000..c63895f74f --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_update.go @@ -0,0 +1,75 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + "strconv" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. +// It should be the value as set *before* the update. You can find this value in the Meta field +// of swarm.Service, which can be found using ServiceInspectWithRaw. +func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { + var ( + query = url.Values{} + response = types.ServiceUpdateResponse{} + ) + + headers := map[string][]string{ + "version": {cli.version}, + } + + if options.EncodedRegistryAuth != "" { + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + if options.RegistryAuthFrom != "" { + query.Set("registryAuthFrom", options.RegistryAuthFrom) + } + + if options.Rollback != "" { + query.Set("rollback", options.Rollback) + } + + query.Set("version", strconv.FormatUint(version.Index, 10)) + + if err := validateServiceSpec(service); err != nil { + return response, err + } + + // ensure that the image is tagged + var resolveWarning string + switch { + case service.TaskTemplate.ContainerSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolveContainerSpecImage(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + case service.TaskTemplate.PluginSpec != nil: + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + resolveWarning = resolvePluginSpecRemote(ctx, cli, &service.TaskTemplate, options.EncodedRegistryAuth) + } + } + + resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + if resolveWarning != "" { + response.Warnings = append(response.Warnings, resolveWarning) + } + + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 0000000000..19f59dd582 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/docker/docker/client/swarm_init.go new file mode 100644 index 0000000000..da3c1637ef --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_init.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInit initializes the swarm. +func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { + serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return "", err + } + + var response string + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/docker/docker/client/swarm_inspect.go new file mode 100644 index 0000000000..b52b67a884 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmInspect inspects the swarm. +func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { + serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Swarm{}, err + } + + var response swarm.Swarm + err = json.NewDecoder(serverResp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/docker/docker/client/swarm_join.go new file mode 100644 index 0000000000..a1cf0455d2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_join.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmJoin joins the swarm. +func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { + resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/docker/docker/client/swarm_leave.go new file mode 100644 index 0000000000..90ca84b363 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_leave.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// SwarmLeave leaves the swarm. +func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { + query := url.Values{} + if force { + query.Set("force", "1") + } + resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 0000000000..d2412f7d44 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,14 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUnlock unlocks locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go new file mode 100644 index 0000000000..56a5bea761 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -0,0 +1,22 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "fmt" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// SwarmUpdate updates the swarm. +func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) + query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) + resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/docker/docker/client/task_inspect.go new file mode 100644 index 0000000000..44d40ba5ae --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_inspect.go @@ -0,0 +1,32 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// TaskInspectWithRaw returns the task information and its raw representation.. +func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} + } + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) + } + + body, err := ioutil.ReadAll(serverResp.body) + if err != nil { + return swarm.Task{}, nil, err + } + + var response swarm.Task + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&response) + return response, body, err +} diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go new file mode 100644 index 0000000000..4869b44493 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -0,0 +1,35 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// TaskList returns the list of tasks. +func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var tasks []swarm.Task + err = json.NewDecoder(resp.body).Decode(&tasks) + return tasks, err +} diff --git a/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/docker/docker/client/task_logs.go new file mode 100644 index 0000000000..6222fab577 --- /dev/null +++ b/vendor/github.com/docker/docker/client/task_logs.go @@ -0,0 +1,51 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "io" + "net/url" + "time" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// TaskLogs returns the logs generated by a task in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) TaskLogs(ctx context.Context, taskID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/tasks/"+taskID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/docker/docker/client/transport.go new file mode 100644 index 0000000000..5541344366 --- /dev/null +++ b/vendor/github.com/docker/docker/client/transport.go @@ -0,0 +1,17 @@ +package client // import "github.com/docker/docker/client" + +import ( + "crypto/tls" + "net/http" +) + +// resolveTLSConfig attempts to resolve the TLS configuration from the +// RoundTripper. +func resolveTLSConfig(transport http.RoundTripper) *tls.Config { + switch tr := transport.(type) { + case *http.Transport: + return tr.TLSClientConfig + default: + return nil + } +} diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 0000000000..7f3ff44eb8 --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,34 @@ +package client // import "github.com/docker/docker/client" + +import ( + "net/url" + "regexp" + + "github.com/docker/docker/api/types/filters" +) + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} + +// getFiltersQuery returns a url query with "filters" query term, based on the +// filters provided. +func getFiltersQuery(f filters.Args) (url.Values, error) { + query := url.Values{} + if f.Len() > 0 { + filterJSON, err := filters.ToJSON(f) + if err != nil { + return query, err + } + query.Set("filters", filterJSON) + } + return query, nil +} diff --git a/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/docker/docker/client/version.go new file mode 100644 index 0000000000..8f17ff4e87 --- /dev/null +++ b/vendor/github.com/docker/docker/client/version.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" +) + +// ServerVersion returns information of the docker client and server host. +func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { + resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return types.Version{}, err + } + + var server types.Version + err = json.NewDecoder(resp.body).Decode(&server) + return server, err +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go new file mode 100644 index 0000000000..92761b3c63 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" +) + +// VolumeCreate creates a volume in the docker host. +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { + var volume types.Volume + resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volume, err + } + err = json.NewDecoder(resp.body).Decode(&volume) + return volume, err +} diff --git a/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/docker/docker/client/volume_inspect.go new file mode 100644 index 0000000000..e20b2c67c7 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types" +) + +// VolumeInspect returns the information about a specific volume in the docker host. +func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { + volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) + return volume, err +} + +// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation +func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + if volumeID == "" { + return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + + var volume types.Volume + resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volume, nil, wrapResponseError(err, resp, "volume", volumeID) + } + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return volume, nil, err + } + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&volume) + return volume, body, err +} diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go new file mode 100644 index 0000000000..942498dde2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -0,0 +1,33 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" +) + +// VolumeList returns the volumes configured in the docker host. +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody + query := url.Values{} + + if filter.Len() > 0 { + //nolint:staticcheck // ignore SA1019 for old code + filterJSON, err := filters.ToParamWithVersion(cli.version, filter) + if err != nil { + return volumes, err + } + query.Set("filters", filterJSON) + } + resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return volumes, err + } + + err = json.NewDecoder(resp.body).Decode(&volumes) + return volumes, err +} diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go new file mode 100644 index 0000000000..6e324708f2 --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" +) + +// VolumesPrune requests the daemon to delete unused data +func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) (types.VolumesPruneReport, error) { + var report types.VolumesPruneReport + + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + + query, err := getFiltersQuery(pruneFilters) + if err != nil { + return report, err + } + + serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + if err != nil { + return report, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving volume prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go new file mode 100644 index 0000000000..79decdafab --- /dev/null +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + + "github.com/docker/docker/api/types/versions" +) + +// VolumeRemove removes a volume from the docker host. +func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { + query := url.Values{} + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } + } + resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "volume", volumeID) +} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 0000000000..61e7456b4e --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,69 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 0000000000..c211f174fc --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 0000000000..fe06fb6f70 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,279 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +func (e errNotFound) Unwrap() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +func (e errInvalidParameter) Unwrap() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +func (e errConflict) Unwrap() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +func (e errUnauthorized) Unwrap() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +func (e errUnavailable) Unwrap() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +func (e errForbidden) Unwrap() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +func (e errSystem) Unwrap() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +func (e errNotModified) Unwrap() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +func (e errNotImplemented) Unwrap() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +func (e errUnknown) Unwrap() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +func (e errCancelled) Unwrap() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +func (e errDeadline) Unwrap() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +func (e errDataLoss) Unwrap() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 0000000000..07552f1cc1 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,191 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "fmt" + "net/http" + + containerderrors "github.com/containerd/containerd/errdefs" + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + + // Stop right there + // Are you sure you should be adding a new error class here? Do one of the existing ones work? + + // Note that the below functions are already checking the error causal chain for matches. + switch { + case IsNotFound(err): + statusCode = http.StatusNotFound + case IsInvalidParameter(err): + statusCode = http.StatusBadRequest + case IsConflict(err): + statusCode = http.StatusConflict + case IsUnauthorized(err): + statusCode = http.StatusUnauthorized + case IsUnavailable(err): + statusCode = http.StatusServiceUnavailable + case IsForbidden(err): + statusCode = http.StatusForbidden + case IsNotModified(err): + statusCode = http.StatusNotModified + case IsNotImplemented(err): + statusCode = http.StatusNotImplemented + case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): + statusCode = http.StatusInternalServerError + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromContainerdError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromDistributionError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + if e, ok := err.(causer); ok { + return GetHTTPErrorStatusCode(e.Cause()) + } + + logrus.WithFields(logrus.Fields{ + "module": "api", + "error_type": fmt.Sprintf("%T", err), + }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return err + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + logrus.WithFields(logrus.Fields{ + "module": "api", + "status_code": fmt.Sprintf("%d", statusCode), + }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) + + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch status.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} + +// statusCodeFromDistributionError returns status code according to registry errcode +// code is loosely based on errcode.ServeJSON() in docker/distribution +func statusCodeFromDistributionError(err error) int { + switch errs := err.(type) { + case errcode.Errors: + if len(errs) < 1 { + return http.StatusInternalServerError + } + if _, ok := errs[0].(errcode.ErrorCoder); ok { + return statusCodeFromDistributionError(errs[0]) + } + case errcode.ErrorCoder: + return errs.ErrorCode().Descriptor().HTTPStatusCode + } + return http.StatusInternalServerError +} + +// statusCodeFromContainerdError returns status code for containerd errors when +// consumed directly (not through gRPC) +func statusCodeFromContainerdError(err error) int { + switch { + case containerderrors.IsInvalidArgument(err): + return http.StatusBadRequest + case containerderrors.IsNotFound(err): + return http.StatusNotFound + case containerderrors.IsAlreadyExists(err): + return http.StatusConflict + case containerderrors.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case containerderrors.IsUnavailable(err): + return http.StatusServiceUnavailable + case containerderrors.IsNotImplemented(err): + return http.StatusNotImplemented + default: + return http.StatusInternalServerError + } +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 0000000000..3abf07d0c3 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,107 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 0000000000..5e6310fdcd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,93 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" + "os" + "path/filepath" + "strings" +) + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 0000000000..67ab9e9b31 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,27 @@ +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 0000000000..441bd727b6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,38 @@ +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + "os/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 0000000000..2f81813b28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/docker/docker/pkg/idtools/idtools.go new file mode 100644 index 0000000000..25a57b231e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -0,0 +1,241 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// IDMap contains a single entry for user namespace range remapping. An array +// of IDMap entries represents the structure that will be provided to the Linux +// kernel for creating a user namespace. +type IDMap struct { + ContainerID int `json:"container_id"` + HostID int `json:"host_id"` + Size int `json:"size"` +} + +type subIDRange struct { + Start int + Length int +} + +type ranges []subIDRange + +func (e ranges) Len() int { return len(e) } +func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } +func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } + +const ( + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" +) + +// MkdirAllAndChown creates a directory (include any along the path) and then modifies +// ownership to the requested uid/gid. If the directory already exists, this +// function will still change ownership and permissions. +func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, true) +} + +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership and permissions. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies +// ownership ONLY of newly created directories to the requested uid/gid. If the +// directories along the path exist, no change of ownership or permissions will be performed +func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, false) +} + +// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. +// If the maps are empty, then the root uid/gid will default to "real" 0/0 +func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err + } + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err + } + return uid, gid, nil +} + +// toContainer takes an id mapping, and uses it to translate a +// host ID to the remapped ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id +func toContainer(hostID int, idMap []IDMap) (int, error) { + if idMap == nil { + return hostID, nil + } + for _, m := range idMap { + if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { + contID := m.ContainerID + (hostID - m.HostID) + return contID, nil + } + } + return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) +} + +// toHost takes an id mapping and a remapped ID, and translates the +// ID to the mapped host ID. If no map is provided, then the translation +// assumes a 1-to-1 mapping and returns the passed in id # +func toHost(contID int, idMap []IDMap) (int, error) { + if idMap == nil { + return contID, nil + } + for _, m := range idMap { + if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { + hostID := m.HostID + (contID - m.ContainerID) + return hostID, nil + } + } + return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) +} + +// Identity is either a UID and GID pair or a SID (but not both) +type Identity struct { + UID int + GID int + SID string +} + +// IdentityMapping contains a mappings of UIDs and GIDs +type IdentityMapping struct { + uids []IDMap + gids []IDMap +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { + return &IdentityMapping{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return Identity{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IdentityMapping) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) GIDs() []IDMap { + return i.gids +} + +func createIDMap(subidRanges ranges) []IDMap { + idMap := []IDMap{} + + containerID := 0 + for _, idrange := range subidRanges { + idMap = append(idMap, IDMap{ + ContainerID: containerID, + HostID: idrange.Start, + Size: idrange.Length, + }) + containerID = containerID + idrange.Length + } + return idMap +} + +func parseSubuid(username string) (ranges, error) { + return parseSubidFile(subuidFileName, username) +} + +func parseSubgid(username string) (ranges, error) { + return parseSubidFile(subgidFileName, username) +} + +// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) +// and return all found ranges for a specified username. If the special value +// "ALL" is supplied for username, then all ranges in the file will be returned +func parseSubidFile(path, username string) (ranges, error) { + var rangeList ranges + + subidFile, err := os.Open(path) + if err != nil { + return rangeList, err + } + defer subidFile.Close() + + s := bufio.NewScanner(subidFile) + for s.Scan() { + text := strings.TrimSpace(s.Text()) + if text == "" || strings.HasPrefix(text, "#") { + continue + } + parts := strings.Split(text, ":") + if len(parts) != 3 { + return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) + } + if parts[0] == username || username == "ALL" { + startid, err := strconv.Atoi(parts[1]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + length, err := strconv.Atoi(parts[2]) + if err != nil { + return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) + } + rangeList = append(rangeList, subIDRange{startid, length}) + } + } + + return rangeList, s.Err() +} + +// CurrentIdentity returns the identity of the current process +func CurrentIdentity() Identity { + return Identity{UID: os.Getuid(), GID: os.Getegid()} +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go new file mode 100644 index 0000000000..e7d25ee471 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -0,0 +1,295 @@ +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + "sync" + "syscall" + + "github.com/docker/docker/pkg/system" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +var ( + entOnce sync.Once + getentCmd string +) + +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + // make an array containing the original path asked for, plus (for mkAll == true) + // all path components leading up to the complete path that don't exist before we MkdirAll + // so that we can chown all of them properly at the end. If chownExisting is false, we won't + // chown the full directory path if it exists + + var paths []string + + stat, err := system.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if !chownExisting { + return nil + } + + // short-circuit--we were called with an existing directory and chown was requested + return setPermissions(path, mode, owner.UID, owner.GID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} + } + + if mkAll { + // walk back to "/" looking for directories which do not exist + // and add them to the paths array for chown after creation + dirPath := path + for { + dirPath = filepath.Dir(dirPath) + if dirPath == "/" { + break + } + if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { + paths = append(paths, dirPath) + } + } + if err := system.MkdirAll(path, mode); err != nil { + return err + } + } else { + if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { + return err + } + } + // even if it existed, we will chown the requested path + any subpaths that + // didn't exist when we called MkdirAll + for _, pathComponent := range paths { + if err := setPermissions(pathComponent, mode, owner.UID, owner.GID, nil); err != nil { + return err + } + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +func CanAccess(path string, pair Identity) bool { + statInfo, err := system.Stat(path) + if err != nil { + return false + } + fileMode := os.FileMode(statInfo.Mode()) + permBits := fileMode.Perm() + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) +} + +func accessible(isOwner, isGroup bool, perms os.FileMode) bool { + if isOwner && (perms&0100 == 0100) { + return true + } + if isGroup && (perms&0010 == 0010) { + return true + } + if perms&0001 == 0001 { + return true + } + return false +} + +// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUser(name string) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUser(name) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + usr, err = getentUser(name) + if err != nil { + return user.User{}, err + } + return usr, nil +} + +// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupUID(uid int) (user.User, error) { + // first try a local system files lookup using existing capabilities + usr, err := user.LookupUid(uid) + if err == nil { + return usr, nil + } + // local files lookup failed; attempt to call `getent` to query configured passwd dbs + return getentUser(strconv.Itoa(uid)) +} + +func getentUser(name string) (user.User, error) { + reader, err := callGetent("passwd", name) + if err != nil { + return user.User{}, err + } + users, err := user.ParsePasswd(reader) + if err != nil { + return user.User{}, err + } + if len(users) == 0 { + return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", name) + } + return users[0], nil +} + +// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGroup(name string) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGroup(name) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(name) +} + +// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, +// followed by a call to `getent` for supporting host configured non-files passwd and group dbs +func LookupGID(gid int) (user.Group, error) { + // first try a local system files lookup using existing capabilities + group, err := user.LookupGid(gid) + if err == nil { + return group, nil + } + // local files lookup failed; attempt to call `getent` to query configured group dbs + return getentGroup(strconv.Itoa(gid)) +} + +func getentGroup(name string) (user.Group, error) { + reader, err := callGetent("group", name) + if err != nil { + return user.Group{}, err + } + groups, err := user.ParseGroup(reader) + if err != nil { + return user.Group{}, err + } + if len(groups) == 0 { + return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", name) + } + return groups[0], nil +} + +func callGetent(database, key string) (io.Reader, error) { + entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) + // if no `getent` command on host, can't do anything else + if getentCmd == "" { + return nil, fmt.Errorf("unable to find getent command") + } + out, err := execCmd(getentCmd, database, key) + if err != nil { + exitCode, errC := system.GetExitCode(err) + if errC != nil { + return nil, err + } + switch exitCode { + case 1: + return nil, fmt.Errorf("getent reported invalid parameters/database unknown") + case 2: + return nil, fmt.Errorf("getent unable to find entry %q in %s database", key, database) + case 3: + return nil, fmt.Errorf("getent database doesn't support enumeration") + default: + return nil, err + } + + } + return bytes.NewReader(out), nil +} + +// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +// Likewise for setting permissions. +func setPermissions(p string, mode os.FileMode, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if os.FileMode(stat.Mode()).Perm() != mode.Perm() { + if err := os.Chmod(p, mode.Perm()); err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} + +// NewIdentityMapping takes a requested username and +// using the data from /etc/sub{uid,gid} ranges, creates the +// proper uid and gid remapping ranges for that user/group pair +func NewIdentityMapping(name string) (*IdentityMapping, error) { + usr, err := LookupUser(name) + if err != nil { + return nil, fmt.Errorf("Could not get user for username %s: %v", name, err) + } + + subuidRanges, err := lookupSubUIDRanges(usr) + if err != nil { + return nil, err + } + subgidRanges, err := lookupSubGIDRanges(usr) + if err != nil { + return nil, err + } + + return &IdentityMapping{ + uids: subuidRanges, + gids: subgidRanges, + }, nil +} + +func lookupSubUIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubuid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubuid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, errors.Errorf("no subuid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} + +func lookupSubGIDRanges(usr user.User) ([]IDMap, error) { + rangeList, err := parseSubgid(strconv.Itoa(usr.Uid)) + if err != nil { + return nil, err + } + if len(rangeList) == 0 { + rangeList, err = parseSubgid(usr.Name) + if err != nil { + return nil, err + } + } + if len(rangeList) == 0 { + return nil, errors.Errorf("no subgid ranges found for user %q", usr.Name) + } + return createIDMap(rangeList), nil +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go new file mode 100644 index 0000000000..35ede0fffa --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -0,0 +1,25 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +// This is currently a wrapper around MkdirAll, however, since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode); err != nil { + return err + } + return nil +} + +// CanAccess takes a valid (existing) directory and a uid, gid pair and determines +// if that uid, gid pair has access (execute bit) to the directory +// Windows does not require/support this function, so always return true +func CanAccess(path string, identity Identity) bool { + return true +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go new file mode 100644 index 0000000000..bf7ae0564b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -0,0 +1,164 @@ +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "regexp" + "sort" + "strconv" + "strings" + "sync" +) + +// add a user and/or group to Linux /etc/passwd, /etc/group using standard +// Linux distribution commands: +// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group +// useradd -r -s /bin/false + +var ( + once sync.Once + userCommand string + idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) +) + +const ( + // default length for a UID/GID subordinate range + defaultRangeLen = 65536 + defaultRangeStart = 100000 +) + +// AddNamespaceRangesUser takes a username and uses the standard system +// utility to create a system user/group pair used to hold the +// /etc/sub{uid,gid} ranges which will be used for user namespace +// mapping ranges in containers. +func AddNamespaceRangesUser(name string) (int, int, error) { + if err := addUser(name); err != nil { + return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) + } + + // Query the system for the created uid and gid pair + out, err := execCmd("id", name) + if err != nil { + return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) + } + matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) + if len(matches) != 3 { + return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) + } + uid, err := strconv.Atoi(matches[1]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) + } + gid, err := strconv.Atoi(matches[2]) + if err != nil { + return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) + } + + // Now we need to create the subuid/subgid ranges for our new user/group (system users + // do not get auto-created ranges in subuid/subgid) + + if err := createSubordinateRanges(name); err != nil { + return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) + } + return uid, gid, nil +} + +func addUser(name string) error { + once.Do(func() { + // set up which commands are used for adding users/groups dependent on distro + if _, err := resolveBinary("adduser"); err == nil { + userCommand = "adduser" + } else if _, err := resolveBinary("useradd"); err == nil { + userCommand = "useradd" + } + }) + var args []string + switch userCommand { + case "adduser": + args = []string{"--system", "--shell", "/bin/false", "--no-create-home", "--disabled-login", "--disabled-password", "--group", name} + case "useradd": + args = []string{"-r", "-s", "/bin/false", name} + default: + return fmt.Errorf("cannot add user; no useradd/adduser binary found") + } + + if out, err := execCmd(userCommand, args...); err != nil { + return fmt.Errorf("failed to add user with error: %v; output: %q", err, string(out)) + } + return nil +} + +func createSubordinateRanges(name string) error { + + // first, we should verify that ranges weren't automatically created + // by the distro tooling + ranges, err := parseSubuid(name) + if err != nil { + return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no UID ranges; let's create one + startID, err := findNextUIDRange() + if err != nil { + return fmt.Errorf("Can't find available subuid range: %v", err) + } + out, err := execCmd("usermod", "-v", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) + if err != nil { + return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) + } + } + + ranges, err = parseSubgid(name) + if err != nil { + return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) + } + if len(ranges) == 0 { + // no GID ranges; let's create one + startID, err := findNextGIDRange() + if err != nil { + return fmt.Errorf("Can't find available subgid range: %v", err) + } + out, err := execCmd("usermod", "-w", fmt.Sprintf("%d-%d", startID, startID+defaultRangeLen-1), name) + if err != nil { + return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) + } + } + return nil +} + +func findNextUIDRange() (int, error) { + ranges, err := parseSubuid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextGIDRange() (int, error) { + ranges, err := parseSubgid("ALL") + if err != nil { + return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) + } + sort.Sort(ranges) + return findNextRangeStart(ranges) +} + +func findNextRangeStart(rangeList ranges) (int, error) { + startID := defaultRangeStart + for _, arange := range rangeList { + if wouldOverlap(arange, startID) { + startID = arange.Start + arange.Length + } + } + return startID, nil +} + +func wouldOverlap(arange subIDRange, ID int) bool { + low := ID + high := ID + defaultRangeLen + if (low >= arange.Start && low <= arange.Start+arange.Length) || + (high <= arange.Start+arange.Length && high >= arange.Start) { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go new file mode 100644 index 0000000000..e7c4d63118 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import "fmt" + +// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair +// and calls the appropriate helper function to add the group and then +// the user to the group in /etc/group and /etc/passwd respectively. +func AddNamespaceRangesUser(name string) (int, int, error) { + return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") +} diff --git a/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go new file mode 100644 index 0000000000..1e2d4a7a75 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +package idtools // import "github.com/docker/docker/pkg/idtools" + +import ( + "fmt" + "os/exec" + "path/filepath" +) + +func resolveBinary(binname string) (string, error) { + binaryPath, err := exec.LookPath(binname) + if err != nil { + return "", err + } + resolvedPath, err := filepath.EvalSymlinks(binaryPath) + if err != nil { + return "", err + } + // only return no error if the final resolved binary basename + // matches what was searched for + if filepath.Base(resolvedPath) == binname { + return resolvedPath, nil + } + return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) +} + +func execCmd(cmd string, arg ...string) ([]byte, error) { + execCmd := exec.Command(cmd, arg...) + return execCmd.CombinedOutput() +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 0000000000..466f79294b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000000..87514b643d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,187 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 0000000000..534d66ac26 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 0000000000..1f657bd3dc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,157 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "io" +) + +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { + io.Reader + closer func() error +} + +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &ReadCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000000..dc894f9131 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000000..ecaba2e36d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,16 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000000..91b8d18266 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 0000000000..61c679497d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 0000000000..cf8d04b1b2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,283 @@ +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + units "github.com/docker/go-units" + "github.com/moby/term" + "github.com/morikuni/aec" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` // deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` // deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) +} + +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) +} + +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) +} + +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("authentication is required") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) + endl = "\r" + fmt.Fprint(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { // deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]uint) + ) + + for { + var diff uint + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = uint(len(ids)) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]uint) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 0000000000..4177affba2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath // import "github.com/docker/docker/pkg/longpath" + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go new file mode 100644 index 0000000000..8f6e0a737a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -0,0 +1,190 @@ +package stdcopy // import "github.com/docker/docker/pkg/stdcopy" + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "sync" +) + +// StdType is the type of standard stream +// a writer can multiplex to. +type StdType byte + +const ( + // Stdin represents standard input stream type. + Stdin StdType = iota + // Stdout represents standard output stream type. + Stdout + // Stderr represents standard error steam type. + Stderr + // Systemerr represents errors originating from the system that make it + // into the multiplexed stream. + Systemerr + + stdWriterPrefixLen = 8 + stdWriterFdIndex = 0 + stdWriterSizeIndex = 4 + + startingBufLen = 32*1024 + stdWriterPrefixLen + 1 +) + +var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} + +// stdWriter is wrapper of io.Writer with extra customized info. +type stdWriter struct { + io.Writer + prefix byte +} + +// Write sends the buffer to the underneath writer. +// It inserts the prefix header before the buffer, +// so stdcopy.StdCopy knows where to multiplex the output. +// It makes stdWriter to implement io.Writer. +func (w *stdWriter) Write(p []byte) (n int, err error) { + if w == nil || w.Writer == nil { + return 0, errors.New("Writer not instantiated") + } + if p == nil { + return 0, nil + } + + header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} + binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) + buf := bufPool.Get().(*bytes.Buffer) + buf.Write(header[:]) + buf.Write(p) + + n, err = w.Writer.Write(buf.Bytes()) + n -= stdWriterPrefixLen + if n < 0 { + n = 0 + } + + buf.Reset() + bufPool.Put(buf) + return +} + +// NewStdWriter instantiates a new Writer. +// Everything written to it will be encapsulated using a custom format, +// and written to the underlying `w` stream. +// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. +// `t` indicates the id of the stream to encapsulate. +// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. +func NewStdWriter(w io.Writer, t StdType) io.Writer { + return &stdWriter{ + Writer: w, + prefix: byte(t), + } +} + +// StdCopy is a modified version of io.Copy. +// +// StdCopy will demultiplex `src`, assuming that it contains two streams, +// previously multiplexed together using a StdWriter instance. +// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. +// +// StdCopy will read until it hits EOF on `src`. It will then return a nil error. +// In other words: if `err` is non nil, it indicates a real underlying error. +// +// `written` will hold the total number of bytes written to `dstout` and `dsterr`. +func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { + var ( + buf = make([]byte, startingBufLen) + bufLen = len(buf) + nr, nw int + er, ew error + out io.Writer + frameSize int + ) + + for { + // Make sure we have at least a full header + for nr < stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + stream := StdType(buf[stdWriterFdIndex]) + // Check the first byte to know where to write + switch stream { + case Stdin: + fallthrough + case Stdout: + // Write on stdout + out = dstout + case Stderr: + // Write on stderr + out = dsterr + case Systemerr: + // If we're on Systemerr, we won't write anywhere. + // NB: if this code changes later, make sure you don't try to write + // to outstream if Systemerr is the stream + out = nil + default: + return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) + } + + // Retrieve the size of the frame + frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) + + // Check if the buffer is big enough to read the frame. + // Extend it if necessary. + if frameSize+stdWriterPrefixLen > bufLen { + buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) + bufLen = len(buf) + } + + // While the amount of bytes read is less than the size of the frame + header, we keep reading + for nr < frameSize+stdWriterPrefixLen { + var nr2 int + nr2, er = src.Read(buf[nr:]) + nr += nr2 + if er == io.EOF { + if nr < frameSize+stdWriterPrefixLen { + return written, nil + } + break + } + if er != nil { + return 0, er + } + } + + // we might have an error from the source mixed up in our multiplexed + // stream. if we do, return it. + if stream == Systemerr { + return written, fmt.Errorf("error from daemon in stream: %s", string(buf[stdWriterPrefixLen:frameSize+stdWriterPrefixLen])) + } + + // Write the retrieved frame (without header) + nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) + if ew != nil { + return 0, ew + } + + // If the frame has not been fully written: error + if nw != frameSize { + return 0, io.ErrShortWrite + } + written += int64(nw) + + // Move the rest of the buffer to the beginning + copy(buf, buf[frameSize+stdWriterPrefixLen:]) + // Move the index + nr -= frameSize + stdWriterPrefixLen + } +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md new file mode 100644 index 0000000000..37a5098fd9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go new file mode 100644 index 0000000000..5fe071d628 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -0,0 +1,63 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid // import "github.com/docker/docker/pkg/stringid" + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "regexp" + "strconv" + "strings" +) + +const shortLen = 12 + +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + b := make([]byte, 32) + for { + if _, err := rand.Read(b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/docker/docker/pkg/system/args_windows.go new file mode 100644 index 0000000000..b7c9487a06 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/args_windows.go @@ -0,0 +1,16 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "golang.org/x/sys/windows" +) + +// EscapeArgs makes a Windows-style escaped command line from a set of arguments +func EscapeArgs(args []string) string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = windows.EscapeArg(a) + } + return strings.Join(escapedArgs, " ") +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/docker/docker/pkg/system/chtimes.go new file mode 100644 index 0000000000..c26a4e24b6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -0,0 +1,31 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// Chtimes changes the access time and modified time of a file at the given path +func Chtimes(name string, atime time.Time, mtime time.Time) error { + unixMinTime := time.Unix(0, 0) + unixMaxTime := maxTime + + // If the modified time is prior to the Unix Epoch, or after the + // end of Unix Time, os.Chtimes has undefined behavior + // default to Unix Epoch in this case, just in case + + if atime.Before(unixMinTime) || atime.After(unixMaxTime) { + atime = unixMinTime + } + + if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { + mtime = unixMinTime + } + + if err := os.Chtimes(name, atime, mtime); err != nil { + return err + } + + // Take platform specific action for setting create time. + return setCTime(name, mtime) +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go new file mode 100644 index 0000000000..d5fab96f9d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_nowindows.go @@ -0,0 +1,14 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" +) + +// setCTime will set the create time on a file. On Unix, the create +// time is updated as a side effect of setting the modified time, so +// no action is required. +func setCTime(path string, ctime time.Time) error { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go new file mode 100644 index 0000000000..6664b8bcad --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -0,0 +1,26 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "time" + + "golang.org/x/sys/windows" +) + +// setCTime will set the create time on a file. On Windows, this requires +// calling SetFileTime and explicitly including the create time. +func setCTime(path string, ctime time.Time) error { + ctimespec := windows.NsecToTimespec(ctime.UnixNano()) + pathp, e := windows.UTF16PtrFromString(path) + if e != nil { + return e + } + h, e := windows.CreateFile(pathp, + windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, + windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) + if e != nil { + return e + } + defer windows.Close(h) + c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) + return windows.SetFileTime(h, &c, nil, nil) +} diff --git a/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/docker/docker/pkg/system/errors.go new file mode 100644 index 0000000000..2573d71622 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/errors.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "errors" +) + +var ( + // ErrNotSupportedPlatform means the platform is not supported. + ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") +) diff --git a/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/docker/docker/pkg/system/exitcode.go new file mode 100644 index 0000000000..4ba8fe35bf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -0,0 +1,19 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "os/exec" + "syscall" +) + +// GetExitCode returns the ExitStatus of the specified error if its type is +// exec.ExitError, returns 0 and an error otherwise. +func GetExitCode(err error) (int, error) { + exitCode := 0 + if exiterr, ok := err.(*exec.ExitError); ok { + if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { + return procExit.ExitStatus(), nil + } + } + return exitCode, fmt.Errorf("failed to get exit code") +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_unix.go b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go new file mode 100644 index 0000000000..dcee3e9f98 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_unix.go @@ -0,0 +1,67 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "io/ioutil" + "os" + "path/filepath" +) + +// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return os.MkdirAll(path, perm) +} + +// MkdirAll creates a directory named path along with any necessary parents, +// with permission specified by attribute perm for all dir created. +func MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. +func IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +// The functions below here are wrappers for the equivalents in the os and ioutils packages. +// They are passthrough on Unix platforms, and only relevant on Windows. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return os.Create(name) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return os.Open(name) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. It opens the named file with specified flag +// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful, +// methods on the returned File can be used for I/O. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) { + return os.OpenFile(name, flag, perm) +} + +// TempFileSequential creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + return ioutil.TempFile(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go new file mode 100644 index 0000000000..b4646277ab --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -0,0 +1,292 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + "sync" + "syscall" + "time" + "unsafe" + + "golang.org/x/sys/windows" +) + +const ( + // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System + SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" +) + +// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory +// with an appropriate SDDL defined ACL. +func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error { + return mkdirall(path, true, sddl) +} + +// MkdirAll implementation that is volume path aware for Windows. It can be used +// as a drop-in replacement for os.MkdirAll() +func MkdirAll(path string, _ os.FileMode) error { + return mkdirall(path, false, "") +} + +// mkdirall is a custom version of os.MkdirAll modified for use on Windows +// so that it is both volume path aware, and can create a directory with +// a DACL. +func mkdirall(path string, applyACL bool, sddl string) error { + if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { + return nil + } + + // The rest of this method is largely copied from os.MkdirAll and should be kept + // as-is to ensure compatibility. + + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{ + Op: "mkdir", + Path: path, + Err: syscall.ENOTDIR, + } + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent + err = mkdirall(path[0:j-1], false, sddl) + if err != nil { + return err + } + } + + // Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result. + if applyACL { + err = mkdirWithACL(path, sddl) + } else { + err = os.Mkdir(path, 0) + } + + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + return nil +} + +// mkdirWithACL creates a new directory. If there is an error, it will be of +// type *PathError. . +// +// This is a modified and combined version of os.Mkdir and windows.Mkdir +// in golang to cater for creating a directory am ACL permitting full +// access, with inheritance, to any subfolder/file for Built-in Administrators +// and Local System. +func mkdirWithACL(name string, sddl string) error { + sa := windows.SecurityAttributes{Length: 0} + sd, err := windows.SecurityDescriptorFromString(sddl) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + sa.SecurityDescriptor = sd + + namep, err := windows.UTF16PtrFromString(name) + if err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + + e := windows.CreateDirectory(namep, &sa) + if e != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: e} + } + return nil +} + +// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, +// golang filepath.IsAbs does not consider a path \windows\system32 as absolute +// as it doesn't start with a drive-letter/colon combination. However, in +// docker we need to verify things such as WORKDIR /windows/system32 in +// a Dockerfile (which gets translated to \windows\system32 when being processed +// by the daemon. This SHOULD be treated as absolute from a docker processing +// perspective. +func IsAbs(path string) bool { + if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) { + return true + } + return false +} + +// The origin of the functions below here are the golang OS and windows packages, +// slightly modified to only cope with files, not directories due to the +// specific use case. +// +// The alteration is to allow a file on Windows to be opened with +// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating +// the standby list, particularly when accessing large files such as layer.tar. + +// CreateSequential creates the named file with mode 0666 (before umask), truncating +// it if it already exists. If successful, methods on the returned +// File can be used for I/O; the associated file descriptor has mode +// O_RDWR. +// If there is an error, it will be of type *PathError. +func CreateSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0) +} + +// OpenSequential opens the named file for reading. If successful, methods on +// the returned file can be used for reading; the associated file +// descriptor has mode O_RDONLY. +// If there is an error, it will be of type *PathError. +func OpenSequential(name string) (*os.File, error) { + return OpenFileSequential(name, os.O_RDONLY, 0) +} + +// OpenFileSequential is the generalized open call; most users will use Open +// or Create instead. +// If there is an error, it will be of type *PathError. +func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) { + if name == "" { + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT} + } + r, errf := windowsOpenFileSequential(name, flag, 0) + if errf == nil { + return r, nil + } + return nil, &os.PathError{Op: "open", Path: name, Err: errf} +} + +func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) { + r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0) + if e != nil { + return nil, e + } + return os.NewFile(uintptr(r), name), nil +} + +func makeInheritSa() *windows.SecurityAttributes { + var sa windows.SecurityAttributes + sa.Length = uint32(unsafe.Sizeof(sa)) + sa.InheritHandle = 1 + return &sa +} + +func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) { + if len(path) == 0 { + return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND + } + pathp, err := windows.UTF16PtrFromString(path) + if err != nil { + return windows.InvalidHandle, err + } + var access uint32 + switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) { + case windows.O_RDONLY: + access = windows.GENERIC_READ + case windows.O_WRONLY: + access = windows.GENERIC_WRITE + case windows.O_RDWR: + access = windows.GENERIC_READ | windows.GENERIC_WRITE + } + if mode&windows.O_CREAT != 0 { + access |= windows.GENERIC_WRITE + } + if mode&windows.O_APPEND != 0 { + access &^= windows.GENERIC_WRITE + access |= windows.FILE_APPEND_DATA + } + sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE) + var sa *windows.SecurityAttributes + if mode&windows.O_CLOEXEC == 0 { + sa = makeInheritSa() + } + var createmode uint32 + switch { + case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL): + createmode = windows.CREATE_NEW + case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC): + createmode = windows.CREATE_ALWAYS + case mode&windows.O_CREAT == windows.O_CREAT: + createmode = windows.OPEN_ALWAYS + case mode&windows.O_TRUNC == windows.O_TRUNC: + createmode = windows.TRUNCATE_EXISTING + default: + createmode = windows.OPEN_EXISTING + } + // Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang. + // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx + const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN + h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0) + return h, e +} + +// Helpers for TempFileSequential +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFileSequential is a copy of ioutil.TempFile, modified to use sequential +// file access. Below is the original comment from golang: +// TempFile creates a new temporary file in the directory dir +// with a name beginning with prefix, opens the file for reading +// and writing, and returns the resulting *os.File. +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFileSequential(dir, prefix string) (f *os.File, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/docker/docker/pkg/system/init.go new file mode 100644 index 0000000000..a17597aaba --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init.go @@ -0,0 +1,22 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "time" + "unsafe" +) + +// Used by chtimes +var maxTime time.Time + +func init() { + // chtimes initialization + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/docker/docker/pkg/system/init_windows.go new file mode 100644 index 0000000000..a91288c60b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -0,0 +1,29 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + + "github.com/sirupsen/logrus" +) + +var ( + // containerdRuntimeSupported determines if ContainerD should be the runtime. + // As of March 2019, this is an experimental feature. + containerdRuntimeSupported = false +) + +// InitContainerdRuntime sets whether to use ContainerD for runtime +// on Windows. This is an experimental feature still in development, and +// also requires an environment variable to be set (so as not to turn the +// feature on from simply experimental which would also mean LCOW. +func InitContainerdRuntime(experimental bool, cdPath string) { + if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { + logrus.Warnf("Using ContainerD runtime. This feature is experimental") + containerdRuntimeSupported = true + } +} + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported() bool { + return containerdRuntimeSupported +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 0000000000..0f00028fbd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,48 @@ +// +build windows,!no_lcow + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "github.com/Microsoft/hcsshim/osversion" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + // lcowSupported determines if Linux Containers on Windows are supported. + lcowSupported = false +) + +// InitLCOW sets whether LCOW is supported or not. Requires RS5+ +func InitLCOW(experimental bool) { + if experimental && osversion.Build() >= osversion.RS5 { + lcowSupported = true + } +} + +func LCOWSupported() bool { + return lcowSupported +} + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary windows-only function, should be replaced by +// comparison of worker capabilities +func ValidatePlatform(platform specs.Platform) error { + if !IsOSSupported(platform.OS) { + return errors.Errorf("unsupported os %s", platform.OS) + } + return nil +} + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if strings.EqualFold("windows", os) { + return true + } + if LCOWSupported() && strings.EqualFold(os, "linux") { + return true + } + return false +} diff --git a/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go new file mode 100644 index 0000000000..3d3cf775a7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lcow_unsupported.go @@ -0,0 +1,28 @@ +// +build !windows windows,no_lcow + +package system // import "github.com/docker/docker/pkg/system" +import ( + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(_ bool) {} + +// LCOWSupported returns true if Linux containers on Windows are supported. +func LCOWSupported() bool { + return false +} + +// ValidatePlatform determines if a platform structure is valid. This function +// is used for LCOW, and is a no-op on non-windows platforms. +func ValidatePlatform(_ specs.Platform) error { + return nil +} + +// IsOSSupported determines if an operating system is supported by the host. +func IsOSSupported(os string) bool { + return strings.EqualFold(runtime.GOOS, os) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go new file mode 100644 index 0000000000..de5a1c0fb2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -0,0 +1,20 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// Lstat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Lstat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Lstat(path, s); err != nil { + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go new file mode 100644 index 0000000000..359c791d9b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -0,0 +1,14 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// Lstat calls os.Lstat to get a fileinfo interface back. +// This is then copied into our own locally defined structure. +func Lstat(path string) (*StatT, error) { + fi, err := os.Lstat(path) + if err != nil { + return nil, err + } + + return fromStatT(&fi) +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/docker/docker/pkg/system/meminfo.go new file mode 100644 index 0000000000..6667eb84dc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -0,0 +1,17 @@ +package system // import "github.com/docker/docker/pkg/system" + +// MemInfo contains memory statistics of the host system. +type MemInfo struct { + // Total usable RAM (i.e. physical RAM minus a few reserved bits and the + // kernel binary code). + MemTotal int64 + + // Amount of free memory. + MemFree int64 + + // Total amount of swap space available. + SwapTotal int64 + + // Amount of swap space that is currently unused. + SwapFree int64 +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go new file mode 100644 index 0000000000..cd060eff24 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -0,0 +1,71 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "bufio" + "io" + "os" + "strconv" + "strings" + + units "github.com/docker/go-units" +) + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + file, err := os.Open("/proc/meminfo") + if err != nil { + return nil, err + } + defer file.Close() + return parseMemInfo(file) +} + +// parseMemInfo parses the /proc/meminfo file into +// a MemInfo object given an io.Reader to the file. +// Throws error if there are problems reading from the file +func parseMemInfo(reader io.Reader) (*MemInfo, error) { + meminfo := &MemInfo{} + scanner := bufio.NewScanner(reader) + memAvailable := int64(-1) + for scanner.Scan() { + // Expected format: ["MemTotal:", "1234", "kB"] + parts := strings.Fields(scanner.Text()) + + // Sanity checks: Skip malformed entries. + if len(parts) < 3 || parts[2] != "kB" { + continue + } + + // Convert to bytes. + size, err := strconv.Atoi(parts[1]) + if err != nil { + continue + } + bytes := int64(size) * units.KiB + + switch parts[0] { + case "MemTotal:": + meminfo.MemTotal = bytes + case "MemFree:": + meminfo.MemFree = bytes + case "MemAvailable:": + memAvailable = bytes + case "SwapTotal:": + meminfo.SwapTotal = bytes + case "SwapFree:": + meminfo.SwapFree = bytes + } + + } + if memAvailable != -1 { + meminfo.MemFree = memAvailable + } + + // Handle errors that may have occurred during the reading of the file. + if err := scanner.Err(); err != nil { + return nil, err + } + + return meminfo, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go new file mode 100644 index 0000000000..56f4494268 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -0,0 +1,8 @@ +// +build !linux,!windows + +package system // import "github.com/docker/docker/pkg/system" + +// ReadMemInfo is not supported on platforms other than linux and windows. +func ReadMemInfo() (*MemInfo, error) { + return nil, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go new file mode 100644 index 0000000000..6ed93f2fe2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -0,0 +1,45 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "unsafe" + + "golang.org/x/sys/windows" +) + +var ( + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + + procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") +) + +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx +// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx +type memorystatusex struct { + dwLength uint32 + dwMemoryLoad uint32 + ullTotalPhys uint64 + ullAvailPhys uint64 + ullTotalPageFile uint64 + ullAvailPageFile uint64 + ullTotalVirtual uint64 + ullAvailVirtual uint64 + ullAvailExtendedVirtual uint64 +} + +// ReadMemInfo retrieves memory statistics of the host system and returns a +// MemInfo type. +func ReadMemInfo() (*MemInfo, error) { + msi := &memorystatusex{ + dwLength: 64, + } + r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) + if r1 == 0 { + return &MemInfo{}, nil + } + return &MemInfo{ + MemTotal: int64(msi.ullTotalPhys), + MemFree: int64(msi.ullAvailPhys), + SwapTotal: int64(msi.ullTotalPageFile), + SwapFree: int64(msi.ullAvailPageFile), + }, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/docker/docker/pkg/system/mknod.go new file mode 100644 index 0000000000..b132482e03 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -0,0 +1,22 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Mknod creates a filesystem node (file, device special file or named pipe) named path +// with attributes specified by mode and dev. +func Mknod(path string, mode uint32, dev int) error { + return unix.Mknod(path, mode, dev) +} + +// Mkdev is used to build the value of linux devices (in /dev/) which specifies major +// and minor number of the newly created device special file. +// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. +// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, +// then the top 12 bits of the minor. +func Mkdev(major int64, minor int64) uint32 { + return uint32(unix.Mkdev(uint32(major), uint32(minor))) +} diff --git a/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go new file mode 100644 index 0000000000..ec89d7a15e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -0,0 +1,11 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Mknod is not implemented on Windows. +func Mknod(path string, mode uint32, dev int) error { + return ErrNotSupportedPlatform +} + +// Mkdev is not implemented on Windows. +func Mkdev(major int64, minor int64) uint32 { + panic("Mkdev not implemented on Windows.") +} diff --git a/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/docker/docker/pkg/system/path.go new file mode 100644 index 0000000000..64e892289a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path.go @@ -0,0 +1,64 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "path/filepath" + "runtime" + "strings" +) + +const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +func DefaultPathEnv(os string) string { + if runtime.GOOS == "windows" { + if os != runtime.GOOS { + return defaultUnixPathEnv + } + // Deliberately empty on Windows containers on Windows as the default path will be set by + // the container. Docker has no context of what the default path should be. + return "" + } + return defaultUnixPathEnv + +} + +// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter +// actually uses in order to avoid system depending on containerd/continuity. +type PathVerifier interface { + IsAbs(string) bool +} + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver PathVerifier) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 0000000000..b0b93196a1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + return path, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 0000000000..22a56136c8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,27 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/windows" + +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p, err := windows.UTF16FromString(path) + if err != nil { + return "", err + } + b := p // GetLongPathName says we can reuse buffer + n, err := windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = windows.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } + } + return windows.UTF16ToString(b), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/docker/docker/pkg/system/process_unix.go new file mode 100644 index 0000000000..79aebb5272 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -0,0 +1,44 @@ +// +build linux freebsd darwin + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "fmt" + "io/ioutil" + "strings" + "syscall" + + "golang.org/x/sys/unix" +) + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + err := unix.Kill(pid, syscall.Signal(0)) + if err == nil || err == unix.EPERM { + return true + } + + return false +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + unix.Kill(pid, unix.SIGKILL) +} + +// IsProcessZombie return true if process has a state with "Z" +// http://man7.org/linux/man-pages/man5/proc.5.html +func IsProcessZombie(pid int) (bool, error) { + statPath := fmt.Sprintf("/proc/%d/stat", pid) + dataBytes, err := ioutil.ReadFile(statPath) + if err != nil { + return false, err + } + data := string(dataBytes) + sdata := strings.SplitN(data, " ", 4) + if len(sdata) >= 3 && sdata[2] == "Z" { + return true, nil + } + + return false, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 0000000000..09bdfa0ca0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + _ = p.Kill() + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/docker/docker/pkg/system/rm.go new file mode 100644 index 0000000000..c5d80ebda1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm.go @@ -0,0 +1,78 @@ +// +build !darwin,!windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" + "time" + + "github.com/moby/sys/mount" + "github.com/pkg/errors" +) + +// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can +// often be remedied. +// Only use `EnsureRemoveAll` if you really want to make every effort to remove +// a directory. +// +// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there +// can be a race between reading directory entries and then actually attempting +// to remove everything in the directory. +// These types of errors do not need to be returned since it's ok for the dir to +// be gone we can just retry the remove operation. +// +// This should not return a `os.ErrNotExist` kind of error under any circumstances +func EnsureRemoveAll(dir string) error { + notExistErr := make(map[string]bool) + + // track retries + exitOnErr := make(map[string]int) + maxRetry := 50 + + // Attempt to unmount anything beneath this dir first + mount.RecursiveUnmount(dir) + + for { + err := os.RemoveAll(dir) + if err == nil { + return nil + } + + pe, ok := err.(*os.PathError) + if !ok { + return err + } + + if os.IsNotExist(err) { + if notExistErr[pe.Path] { + return err + } + notExistErr[pe.Path] = true + + // There is a race where some subdir can be removed but after the parent + // dir entries have been read. + // So the path could be from `os.Remove(subdir)` + // If the reported non-existent path is not the passed in `dir` we + // should just retry, but otherwise return with no error. + if pe.Path == dir { + return nil + } + continue + } + + if pe.Err != syscall.EBUSY { + return err + } + + if e := mount.Unmount(pe.Path); e != nil { + return errors.Wrapf(e, "error while removing %s", dir) + } + + if exitOnErr[pe.Path] == maxRetry { + return err + } + exitOnErr[pe.Path]++ + time.Sleep(100 * time.Millisecond) + } +} diff --git a/vendor/github.com/docker/docker/pkg/system/rm_windows.go b/vendor/github.com/docker/docker/pkg/system/rm_windows.go new file mode 100644 index 0000000000..ed9c5dcb8a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/rm_windows.go @@ -0,0 +1,6 @@ +package system + +import "os" + +// EnsureRemoveAll is an alias to os.RemoveAll on Windows +var EnsureRemoveAll = os.RemoveAll diff --git a/vendor/github.com/docker/docker/pkg/system/stat_bsd.go b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go new file mode 100644 index 0000000000..ea55c3dbb5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_bsd.go @@ -0,0 +1,15 @@ +// +build freebsd netbsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go new file mode 100644 index 0000000000..c1c0ee9f38 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtimespec}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/docker/docker/pkg/system/stat_linux.go new file mode 100644 index 0000000000..17d5d131a3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -0,0 +1,20 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + // the type is 32bit on mips + rdev: uint64(s.Rdev), // nolint: unconvert + mtim: s.Mtim}, nil +} + +// FromStatT converts a syscall.Stat_t type to a system.Stat_t type +// This is exposed on Linux as pkg/archive/changes uses it. +func FromStatT(s *syscall.Stat_t) (*StatT, error) { + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go new file mode 100644 index 0000000000..756b92d1e6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 0000000000..6a51ccd642 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,13 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// fromStatT converts a syscall.Stat_t type to a system.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: s.Mode, + uid: s.Uid, + gid: s.Gid, + rdev: s.Rdev, + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/docker/docker/pkg/system/stat_unix.go new file mode 100644 index 0000000000..86bb6dd55e --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -0,0 +1,66 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "syscall" +) + +// StatT type contains status of a file. It contains metadata +// like permission, owner, group, size, etc about a file. +type StatT struct { + mode uint32 + uid uint32 + gid uint32 + rdev uint64 + size int64 + mtim syscall.Timespec +} + +// Mode returns file's permission mode. +func (s StatT) Mode() uint32 { + return s.mode +} + +// UID returns file's user id of owner. +func (s StatT) UID() uint32 { + return s.uid +} + +// GID returns file's group id of owner. +func (s StatT) GID() uint32 { + return s.gid +} + +// Rdev returns file's device ID (if it's special file). +func (s StatT) Rdev() uint64 { + return s.rdev +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() syscall.Timespec { + return s.mtim +} + +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + s := &syscall.Stat_t{} + if err := syscall.Stat(path, s); err != nil { + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} + } + return fromStatT(s) +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/docker/docker/pkg/system/stat_windows.go new file mode 100644 index 0000000000..b2456cb887 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -0,0 +1,49 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "os" + "time" +) + +// StatT type contains status of a file. It contains metadata +// like permission, size, etc about a file. +type StatT struct { + mode os.FileMode + size int64 + mtim time.Time +} + +// Size returns file's size. +func (s StatT) Size() int64 { + return s.size +} + +// Mode returns file's permission mode. +func (s StatT) Mode() os.FileMode { + return os.FileMode(s.mode) +} + +// Mtim returns file's last modification time. +func (s StatT) Mtim() time.Time { + return time.Time(s.mtim) +} + +// Stat takes a path to a file and returns +// a system.StatT type pertaining to that file. +// +// Throws an error if the file does not exist +func Stat(path string) (*StatT, error) { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + return fromStatT(&fi) +} + +// fromStatT converts a os.FileInfo type to a system.StatT type +func fromStatT(fi *os.FileInfo) (*StatT, error) { + return &StatT{ + size: (*fi).Size(), + mode: (*fi).Mode(), + mtim: (*fi).ModTime()}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go new file mode 100644 index 0000000000..905d10f153 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -0,0 +1,11 @@ +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Unmount is a platform-specific helper function to call +// the unmount syscall. +func Unmount(dest string) error { + return unix.Unmount(dest, 0) +} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go new file mode 100644 index 0000000000..1588aa3ef9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -0,0 +1,136 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + "unsafe" + + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" + "golang.org/x/sys/windows" +) + +const ( + OWNER_SECURITY_INFORMATION = windows.OWNER_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.OWNER_SECURITY_INFORMATION + GROUP_SECURITY_INFORMATION = windows.GROUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.GROUP_SECURITY_INFORMATION + DACL_SECURITY_INFORMATION = windows.DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.DACL_SECURITY_INFORMATION + SACL_SECURITY_INFORMATION = windows.SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SACL_SECURITY_INFORMATION + LABEL_SECURITY_INFORMATION = windows.LABEL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.LABEL_SECURITY_INFORMATION + ATTRIBUTE_SECURITY_INFORMATION = windows.ATTRIBUTE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.ATTRIBUTE_SECURITY_INFORMATION + SCOPE_SECURITY_INFORMATION = windows.SCOPE_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.SCOPE_SECURITY_INFORMATION + PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 + ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 + BACKUP_SECURITY_INFORMATION = windows.BACKUP_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.BACKUP_SECURITY_INFORMATION + PROTECTED_DACL_SECURITY_INFORMATION = windows.PROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_DACL_SECURITY_INFORMATION + PROTECTED_SACL_SECURITY_INFORMATION = windows.PROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.PROTECTED_SACL_SECURITY_INFORMATION + UNPROTECTED_DACL_SECURITY_INFORMATION = windows.UNPROTECTED_DACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_DACL_SECURITY_INFORMATION + UNPROTECTED_SACL_SECURITY_INFORMATION = windows.UNPROTECTED_SACL_SECURITY_INFORMATION // Deprecated: use golang.org/x/sys/windows.UNPROTECTED_SACL_SECURITY_INFORMATION +) + +const ( + SE_UNKNOWN_OBJECT_TYPE = windows.SE_UNKNOWN_OBJECT_TYPE // Deprecated: use golang.org/x/sys/windows.SE_UNKNOWN_OBJECT_TYPE + SE_FILE_OBJECT = windows.SE_FILE_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_FILE_OBJECT + SE_SERVICE = windows.SE_SERVICE // Deprecated: use golang.org/x/sys/windows.SE_SERVICE + SE_PRINTER = windows.SE_PRINTER // Deprecated: use golang.org/x/sys/windows.SE_PRINTER + SE_REGISTRY_KEY = windows.SE_REGISTRY_KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_KEY + SE_LMSHARE = windows.SE_LMSHARE // Deprecated: use golang.org/x/sys/windows.SE_LMSHARE + SE_KERNEL_OBJECT = windows.SE_KERNEL_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_KERNEL_OBJECT + SE_WINDOW_OBJECT = windows.SE_WINDOW_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WINDOW_OBJECT + SE_DS_OBJECT = windows.SE_DS_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT + SE_DS_OBJECT_ALL = windows.SE_DS_OBJECT_ALL // Deprecated: use golang.org/x/sys/windows.SE_DS_OBJECT_ALL + SE_PROVIDER_DEFINED_OBJECT = windows.SE_PROVIDER_DEFINED_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_PROVIDER_DEFINED_OBJECT + SE_WMIGUID_OBJECT = windows.SE_WMIGUID_OBJECT // Deprecated: use golang.org/x/sys/windows.SE_WMIGUID_OBJECT + SE_REGISTRY_WOW64_32KEY = windows.SE_REGISTRY_WOW64_32KEY // Deprecated: use golang.org/x/sys/windows.SE_REGISTRY_WOW64_32KEY +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + +var ( + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion = osversion.OSVersion + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported) +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// GetOSVersion gets the operating system version on Windows. Note that +// dockerd.exe must be manifested to get the correct version information. +// Deprecated: use github.com/Microsoft/hcsshim/osversion.Get() instead +func GetOSVersion() OSVersion { + return osversion.Get() +} + +// IsWindowsClient returns true if the SKU is client +func IsWindowsClient() bool { + osviex := &osVersionInfoEx{OSVersionInfoSize: 284} + r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) + if r1 == 0 { + logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) + return false + } + const verNTWorkstation = 0x00000001 + return osviex.ProductType == verNTWorkstation +} + +// Unmount is a platform-specific helper function to call +// the unmount syscall. Not supported on Windows +func Unmount(_ string) error { + return nil +} + +// HasWin32KSupport determines whether containers that depend on win32k can +// run on this machine. Win32k is the driver used to implement windowing. +func HasWin32KSupport() bool { + // For now, check for ntuser API support on the host. In the future, a host + // may support win32k in containers even if the host does not support ntuser + // APIs. + return ntuserApiset.Load() == nil +} + +// Deprecated: use golang.org/x/sys/windows.SetNamedSecurityInfo() +func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + result = syscall.Errno(r0) + } + return +} + +// Deprecated: uses golang.org/x/sys/windows.SecurityDescriptorFromString() and golang.org/x/sys/windows.SECURITY_DESCRIPTOR.DACL() +func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) + if r1 == 0 { + if e1 != 0 { + result = e1 + } else { + result = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/docker/docker/pkg/system/umask.go new file mode 100644 index 0000000000..9912a2babb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask.go @@ -0,0 +1,13 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "golang.org/x/sys/unix" +) + +// Umask sets current process's file mode creation mask to newmask +// and returns oldmask. +func Umask(newmask int) (oldmask int, err error) { + return unix.Umask(newmask), nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/docker/docker/pkg/system/umask_windows.go new file mode 100644 index 0000000000..fc62388c38 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -0,0 +1,7 @@ +package system // import "github.com/docker/docker/pkg/system" + +// Umask is not supported on the windows platform. +func Umask(newmask int) (oldmask int, err error) { + // should not be called on cli code path + return 0, ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unix.go b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go new file mode 100644 index 0000000000..61ba8c474c --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unix.go @@ -0,0 +1,24 @@ +// +build linux freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// LUtimesNano is used to change access and modification time of the specified path. +// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. +func LUtimesNano(path string, ts []syscall.Timespec) error { + uts := []unix.Timespec{ + unix.NsecToTimespec(syscall.TimespecToNsec(ts[0])), + unix.NsecToTimespec(syscall.TimespecToNsec(ts[1])), + } + err := unix.UtimesNanoAt(unix.AT_FDCWD, path, uts, unix.AT_SYMLINK_NOFOLLOW) + if err != nil && err != unix.ENOSYS { + return err + } + + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go new file mode 100644 index 0000000000..095e072e1d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -0,0 +1,10 @@ +// +build !linux,!freebsd + +package system // import "github.com/docker/docker/pkg/system" + +import "syscall" + +// LUtimesNano is only supported on linux and freebsd. +func LUtimesNano(path string, ts []syscall.Timespec) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go new file mode 100644 index 0000000000..95b609fe7a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -0,0 +1,37 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "golang.org/x/sys/unix" + +// Lgetxattr retrieves the value of the extended attribute identified by attr +// and associated with the given path in the file system. +// It will returns a nil slice and nil error if the xattr is not set. +func Lgetxattr(path string, attr string) ([]byte, error) { + // Start with a 128 length byte array + dest := make([]byte, 128) + sz, errno := unix.Lgetxattr(path, attr, dest) + + for errno == unix.ERANGE { + // Buffer too small, use zero-sized buffer to get the actual size + sz, errno = unix.Lgetxattr(path, attr, []byte{}) + if errno != nil { + return nil, errno + } + dest = make([]byte, sz) + sz, errno = unix.Lgetxattr(path, attr, dest) + } + + switch { + case errno == unix.ENODATA: + return nil, nil + case errno != nil: + return nil, errno + } + + return dest[:sz], nil +} + +// Lsetxattr sets the value of the extended attribute identified by attr +// and associated with the given path in the file system. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return unix.Lsetxattr(path, attr, data, flags) +} diff --git a/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go new file mode 100644 index 0000000000..d780a90cd3 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -0,0 +1,13 @@ +// +build !linux + +package system // import "github.com/docker/docker/pkg/system" + +// Lgetxattr is not supported on platforms other than linux. +func Lgetxattr(path string, attr string) ([]byte, error) { + return nil, ErrNotSupportedPlatform +} + +// Lsetxattr is not supported on platforms other than linux. +func Lsetxattr(path string, attr string, data []byte, flags int) error { + return ErrNotSupportedPlatform +} diff --git a/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go b/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go new file mode 100644 index 0000000000..bafb7b07f8 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/lcow_parser.go @@ -0,0 +1,34 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "path" + + "github.com/docker/docker/api/types/mount" +) + +var lcowSpecificValidators mountValidator = func(m *mount.Mount) error { + if path.Clean(m.Target) == "/" { + return ErrVolumeTargetIsRoot + } + if m.Type == mount.TypeNamedPipe { + return errors.New("Linux containers on Windows do not support named pipe mounts") + } + return nil +} + +type lcowParser struct { + windowsParser +} + +func (p *lcowParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators) +} + +func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/linux_parser.go b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go new file mode 100644 index 0000000000..b5ed4af6a3 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/linux_parser.go @@ -0,0 +1,423 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "path" + "path/filepath" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" +) + +type linuxParser struct { +} + +func linuxSplitRawSpec(raw string) ([]string, error) { + if strings.Count(raw, ":") > 2 { + return nil, errInvalidSpec(raw) + } + + arr := strings.SplitN(raw, ":", 3) + if arr[0] == "" { + return nil, errInvalidSpec(raw) + } + return arr, nil +} + +func linuxValidateNotRoot(p string) error { + p = path.Clean(strings.Replace(p, `\`, `/`, -1)) + if p == "/" { + return ErrVolumeTargetIsRoot + } + return nil +} +func linuxValidateAbsolute(p string) error { + p = strings.Replace(p, `\`, `/`, -1) + if path.IsAbs(p) { + return nil + } + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) +} +func (p *linuxParser) ValidateMountConfig(mnt *mount.Mount) error { + // there was something looking like a bug in existing codebase: + // - validateMountConfig on linux was called with options skipping bind source existence when calling ParseMountRaw + // - but not when calling ParseMountSpec directly... nor when the unit test called it directly + return p.validateMountConfigImpl(mnt, true) +} +func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error { + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := linuxValidateNotRoot(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + if err := linuxValidateAbsolute(mnt.Target); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 && len(linuxPropagationModes) > 0 { + if _, ok := linuxPropagationModes[opts.Propagation]; !ok { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := linuxValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + if validateBindSourceExists { + exists, _, err := currentFileInfoProvider.fileInfo(mnt.Source) + if err != nil { + return &errMountConfig{mnt, err} + } + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + case mount.TypeTmpfs: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + if len(mnt.Source) != 0 { + return &errMountConfig{mnt, errExtraField("Source")} + } + if _, err := p.ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil { + return &errMountConfig{mnt, err} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} + +// read-write modes +var rwModes = map[string]bool{ + "rw": true, + "ro": true, +} + +// label modes +var linuxLabelModes = map[string]bool{ + "Z": true, + "z": true, +} + +// consistency modes +var linuxConsistencyModes = map[mount.Consistency]bool{ + mount.ConsistencyFull: true, + mount.ConsistencyCached: true, + mount.ConsistencyDelegated: true, +} +var linuxPropagationModes = map[mount.Propagation]bool{ + mount.PropagationPrivate: true, + mount.PropagationRPrivate: true, + mount.PropagationSlave: true, + mount.PropagationRSlave: true, + mount.PropagationShared: true, + mount.PropagationRShared: true, +} + +const linuxDefaultPropagationMode = mount.PropagationRPrivate + +func linuxGetPropagation(mode string) mount.Propagation { + for _, o := range strings.Split(mode, ",") { + prop := mount.Propagation(o) + if linuxPropagationModes[prop] { + return prop + } + } + return linuxDefaultPropagationMode +} + +func linuxHasPropagation(mode string) bool { + for _, o := range strings.Split(mode, ",") { + if linuxPropagationModes[mount.Propagation(o)] { + return true + } + } + return false +} + +func linuxValidMountMode(mode string) bool { + if mode == "" { + return true + } + + rwModeCount := 0 + labelModeCount := 0 + propagationModeCount := 0 + copyModeCount := 0 + consistencyModeCount := 0 + + for _, o := range strings.Split(mode, ",") { + switch { + case rwModes[o]: + rwModeCount++ + case linuxLabelModes[o]: + labelModeCount++ + case linuxPropagationModes[mount.Propagation(o)]: + propagationModeCount++ + case copyModeExists(o): + copyModeCount++ + case linuxConsistencyModes[mount.Consistency(o)]: + consistencyModeCount++ + default: + return false + } + } + + // Only one string for each mode is allowed. + if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 { + return false + } + return true +} + +func (p *linuxParser) ReadWrite(mode string) bool { + if !linuxValidMountMode(mode) { + return false + } + + for _, o := range strings.Split(mode, ",") { + if o == "ro" { + return false + } + } + return true +} + +func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + arr, err := linuxSplitRawSpec(raw) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if linuxValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = arr[0] + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = arr[0] + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + + if !linuxValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + if path.IsAbs(spec.Source) { + spec.Type = mount.TypeBind + } else { + spec.Type = mount.TypeVolume + } + + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + if linuxHasPropagation(mode) { + spec.BindOptions = &mount.BindOptions{ + Propagation: linuxGetPropagation(mode), + } + } + + mp, err := p.parseMountSpec(spec, false) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} +func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, true) +} +func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) { + if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: path.Clean(filepath.ToSlash(cfg.Target)), + Type: cfg.Type, + Spec: cfg, + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateRandomID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = path.Clean(filepath.ToSlash(cfg.Source)) + if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 { + mp.Propagation = cfg.BindOptions.Propagation + } else { + // If user did not specify a propagation mode, get + // default propagation mode. + mp.Propagation = linuxDefaultPropagationMode + } + case mount.TypeTmpfs: + // NOP + } + return mp, nil +} + +func (p *linuxParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !linuxValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + // For now don't allow propagation properties while importing + // volumes from data container. These volumes will inherit + // the same propagation property as of the original volume + // in data container. This probably can be relaxed in future. + if linuxHasPropagation(mode) { + return "", "", errInvalidMode(mode) + } + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *linuxParser) DefaultPropagationMode() mount.Propagation { + return linuxDefaultPropagationMode +} + +func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + var rawOpts []string + if readOnly { + rawOpts = append(rawOpts, "ro") + } + + if opt != nil && opt.Mode != 0 { + rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode)) + } + + if opt != nil && opt.SizeBytes != 0 { + // calculate suffix here, making this linux specific, but that is + // okay, since API is that way anyways. + + // we do this by finding the suffix that divides evenly into the + // value, returning the value itself, with no suffix, if it fails. + // + // For the most part, we don't enforce any semantic to this values. + // The operating system will usually align this and enforce minimum + // and maximums. + var ( + size = opt.SizeBytes + suffix string + ) + for _, r := range []struct { + suffix string + divisor int64 + }{ + {"g", 1 << 30}, + {"m", 1 << 20}, + {"k", 1 << 10}, + } { + if size%r.divisor == 0 { + size = size / r.divisor + suffix = r.suffix + break + } + } + + rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix)) + } + return strings.Join(rawOpts, ","), nil +} + +func (p *linuxParser) DefaultCopyMode() bool { + return true +} +func (p *linuxParser) ValidateVolumeName(name string) error { + return nil +} + +func (p *linuxParser) IsBackwardCompatible(m *MountPoint) bool { + return len(m.Source) > 0 || m.Driver == volume.DefaultDriverName +} + +func (p *linuxParser) ValidateTmpfsMountDestination(dest string) error { + if err := linuxValidateNotRoot(dest); err != nil { + return err + } + return linuxValidateAbsolute(dest) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/mounts.go b/vendor/github.com/docker/docker/volume/mounts/mounts.go new file mode 100644 index 0000000000..c441e51ed9 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/mounts.go @@ -0,0 +1,181 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + mounttypes "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/stringid" + "github.com/docker/docker/volume" + "github.com/opencontainers/selinux/go-selinux/label" + "github.com/pkg/errors" +) + +// MountPoint is the intersection point between a volume and a container. It +// specifies which volume is to be used and where inside a container it should +// be mounted. +// +// Note that this type is embedded in `container.Container` object and persisted to disk. +// Changes to this struct need to by synced with on disk state. +type MountPoint struct { + // Source is the source path of the mount. + // E.g. `mount --bind /foo /bar`, `/foo` is the `Source`. + Source string + // Destination is the path relative to the container root (`/`) to the mount point + // It is where the `Source` is mounted to + Destination string + // RW is set to true when the mountpoint should be mounted as read-write + RW bool + // Name is the name reference to the underlying data defined by `Source` + // e.g., the volume name + Name string + // Driver is the volume driver used to create the volume (if it is a volume) + Driver string + // Type of mount to use, see `Type` definitions in github.com/docker/docker/api/types/mount + Type mounttypes.Type `json:",omitempty"` + // Volume is the volume providing data to this mountpoint. + // This is nil unless `Type` is set to `TypeVolume` + Volume volume.Volume `json:"-"` + + // Mode is the comma separated list of options supplied by the user when creating + // the bind/volume mount. + // Note Mode is not used on Windows + Mode string `json:"Relabel,omitempty"` // Originally field was `Relabel`" + + // Propagation describes how the mounts are propagated from the host into the + // mount point, and vice-versa. + // See https://www.kernel.org/doc/Documentation/filesystems/sharedsubtree.txt + // Note Propagation is not used on Windows + Propagation mounttypes.Propagation `json:",omitempty"` // Mount propagation string + + // Specifies if data should be copied from the container before the first mount + // Use a pointer here so we can tell if the user set this value explicitly + // This allows us to error out when the user explicitly enabled copy but we can't copy due to the volume being populated + CopyData bool `json:"-"` + // ID is the opaque ID used to pass to the volume driver. + // This should be set by calls to `Mount` and unset by calls to `Unmount` + ID string `json:",omitempty"` + + // Sepc is a copy of the API request that created this mount. + Spec mounttypes.Mount + + // Some bind mounts should not be automatically created. + // (Some are auto-created for backwards-compatibility) + // This is checked on the API but setting this here prevents race conditions. + // where a bind dir existed during validation was removed before reaching the setup code. + SkipMountpointCreation bool + + // Track usage of this mountpoint + // Specifically needed for containers which are running and calls to `docker cp` + // because both these actions require mounting the volumes. + active int +} + +// Cleanup frees resources used by the mountpoint +func (m *MountPoint) Cleanup() error { + if m.Volume == nil || m.ID == "" { + return nil + } + + if err := m.Volume.Unmount(m.ID); err != nil { + return errors.Wrapf(err, "error unmounting volume %s", m.Volume.Name()) + } + + m.active-- + if m.active == 0 { + m.ID = "" + } + return nil +} + +// Setup sets up a mount point by either mounting the volume if it is +// configured, or creating the source directory if supplied. +// The, optional, checkFun parameter allows doing additional checking +// before creating the source directory on the host. +func (m *MountPoint) Setup(mountLabel string, rootIDs idtools.Identity, checkFun func(m *MountPoint) error) (path string, err error) { + if m.SkipMountpointCreation { + return m.Source, nil + } + + defer func() { + if err != nil || !label.RelabelNeeded(m.Mode) { + return + } + + var sourcePath string + sourcePath, err = filepath.EvalSymlinks(m.Source) + if err != nil { + path = "" + err = errors.Wrapf(err, "error evaluating symlinks from mount source %q", m.Source) + return + } + err = label.Relabel(sourcePath, mountLabel, label.IsShared(m.Mode)) + if errors.Is(err, syscall.ENOTSUP) { + err = nil + } + if err != nil { + path = "" + err = errors.Wrapf(err, "error setting label on mount source '%s'", sourcePath) + } + }() + + if m.Volume != nil { + id := m.ID + if id == "" { + id = stringid.GenerateRandomID() + } + path, err := m.Volume.Mount(id) + if err != nil { + return "", errors.Wrapf(err, "error while mounting volume '%s'", m.Source) + } + + m.ID = id + m.active++ + return path, nil + } + + if len(m.Source) == 0 { + return "", fmt.Errorf("Unable to setup mount point, neither source nor volume defined") + } + + if m.Type == mounttypes.TypeBind { + // Before creating the source directory on the host, invoke checkFun if it's not nil. One of + // the use case is to forbid creating the daemon socket as a directory if the daemon is in + // the process of shutting down. + if checkFun != nil { + if err := checkFun(m); err != nil { + return "", err + } + } + + // idtools.MkdirAllNewAs() produces an error if m.Source exists and is a file (not a directory) + // also, makes sure that if the directory is created, the correct remapped rootUID/rootGID will own it + if err := idtools.MkdirAllAndChownNew(m.Source, 0755, rootIDs); err != nil { + if perr, ok := err.(*os.PathError); ok { + if perr.Err != syscall.ENOTDIR { + return "", errors.Wrapf(err, "error while creating mount source path '%s'", m.Source) + } + } + } + } + return m.Source, nil +} + +// Path returns the path of a volume in a mount point. +func (m *MountPoint) Path() string { + if m.Volume != nil { + return m.Volume.Path() + } + return m.Source +} + +func errInvalidMode(mode string) error { + return errors.Errorf("invalid mode: %v", mode) +} + +func errInvalidSpec(spec string) error { + return errors.Errorf("invalid volume specification: '%s'", spec) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/parser.go b/vendor/github.com/docker/docker/volume/mounts/parser.go new file mode 100644 index 0000000000..73681750ea --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/parser.go @@ -0,0 +1,47 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "runtime" + + "github.com/docker/docker/api/types/mount" +) + +const ( + // OSLinux is the same as runtime.GOOS on linux + OSLinux = "linux" + // OSWindows is the same as runtime.GOOS on windows + OSWindows = "windows" +) + +// ErrVolumeTargetIsRoot is returned when the target destination is root. +// It's used by both LCOW and Linux parsers. +var ErrVolumeTargetIsRoot = errors.New("invalid specification: destination can't be '/'") + +// Parser represents a platform specific parser for mount expressions +type Parser interface { + ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) + ParseMountSpec(cfg mount.Mount) (*MountPoint, error) + ParseVolumesFrom(spec string) (string, string, error) + DefaultPropagationMode() mount.Propagation + ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) + DefaultCopyMode() bool + ValidateVolumeName(name string) error + ReadWrite(mode string) bool + IsBackwardCompatible(m *MountPoint) bool + HasResource(m *MountPoint, absPath string) bool + ValidateTmpfsMountDestination(dest string) error + ValidateMountConfig(mt *mount.Mount) error +} + +// NewParser creates a parser for a given container OS, depending on the current host OS (linux on a windows host will resolve to an lcowParser) +func NewParser(containerOS string) Parser { + switch containerOS { + case OSWindows: + return &windowsParser{} + } + if runtime.GOOS == OSWindows { + return &lcowParser{} + } + return &linuxParser{} +} diff --git a/vendor/github.com/docker/docker/volume/mounts/validate.go b/vendor/github.com/docker/docker/volume/mounts/validate.go new file mode 100644 index 0000000000..9fc9109021 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/validate.go @@ -0,0 +1,28 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + + "github.com/docker/docker/api/types/mount" + "github.com/pkg/errors" +) + +type errMountConfig struct { + mount *mount.Mount + err error +} + +func (e *errMountConfig) Error() string { + return fmt.Sprintf("invalid mount config for type %q: %v", e.mount.Type, e.err.Error()) +} + +func errBindSourceDoesNotExist(path string) error { + return errors.Errorf("bind source path does not exist: %s", path) +} + +func errExtraField(name string) error { + return errors.Errorf("field %s must not be specified", name) +} +func errMissingField(name string) error { + return errors.Errorf("field %s must not be empty", name) +} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_copy.go b/vendor/github.com/docker/docker/volume/mounts/volume_copy.go new file mode 100644 index 0000000000..04056fa50a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_copy.go @@ -0,0 +1,23 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import "strings" + +// {=isEnabled} +var copyModes = map[string]bool{ + "nocopy": false, +} + +func copyModeExists(mode string) bool { + _, exists := copyModes[mode] + return exists +} + +// GetCopyMode gets the copy mode from the mode string for mounts +func getCopyMode(mode string, def bool) (bool, bool) { + for _, o := range strings.Split(mode, ",") { + if isEnabled, exists := copyModes[o]; exists { + return isEnabled, true + } + } + return def, false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_unix.go b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go new file mode 100644 index 0000000000..c6d51e0710 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_unix.go @@ -0,0 +1,18 @@ +// +build linux freebsd darwin + +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "fmt" + "path/filepath" + "strings" +) + +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + relPath, err := filepath.Rel(m.Destination, absolutePath) + return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator)) +} + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/volume_windows.go b/vendor/github.com/docker/docker/volume/mounts/volume_windows.go new file mode 100644 index 0000000000..773e7db88a --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/volume_windows.go @@ -0,0 +1,8 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} +func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool { + return false +} diff --git a/vendor/github.com/docker/docker/volume/mounts/windows_parser.go b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go new file mode 100644 index 0000000000..8f427d8c50 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/mounts/windows_parser.go @@ -0,0 +1,456 @@ +package mounts // import "github.com/docker/docker/volume/mounts" + +import ( + "errors" + "fmt" + "os" + "regexp" + "runtime" + "strings" + + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/pkg/stringid" +) + +type windowsParser struct { +} + +const ( + // Spec should be in the format [source:]destination[:mode] + // + // Examples: c:\foo bar:d:rw + // c:\foo:d:\bar + // myname:d: + // d:\ + // + // Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See + // https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to + // test is https://regex-golang.appspot.com/assets/html/index.html + // + // Useful link for referencing named capturing groups: + // http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex + // + // There are three match groups: source, destination and mode. + // + + // rxHostDir is the first option of a source + rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*` + // rxName is the second option of a source + rxName = `[^\\/:*?"<>|\r\n]+` + + // RXReservedNames are reserved names not possible on Windows + rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])` + + // rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \) + rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+` + // rxSource is the combined possibilities for a source + rxSource = `((?P((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?` + + // Source. Can be either a host directory, a name, or omitted: + // HostDir: + // - Essentially using the folder solution from + // https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html + // but adding case insensitivity. + // - Must be an absolute path such as c:\path + // - Can include spaces such as `c:\program files` + // - And then followed by a colon which is not in the capture group + // - And can be optional + // Name: + // - Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) + // - And then followed by a colon which is not in the capture group + // - And can be optional + + // rxDestination is the regex expression for the mount destination + rxDestination = `(?P((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))` + + rxLCOWDestination = `(?P/(?:[^\\/:*?"<>\r\n]+[/]?)*)` + // Destination (aka container path): + // - Variation on hostdir but can be a drive followed by colon as well + // - If a path, must be absolute. Can include spaces + // - Drive cannot be c: (explicitly checked in code, not RegEx) + + // rxMode is the regex expression for the mode of the mount + // Mode (optional): + // - Hopefully self explanatory in comparison to above regex's. + // - Colon is not in the capture group + rxMode = `(:(?P(?i)ro|rw))?` +) + +type mountValidator func(mnt *mount.Mount) error + +func windowsSplitRawSpec(raw, destRegex string) ([]string, error) { + specExp := regexp.MustCompile(`^` + rxSource + destRegex + rxMode + `$`) + match := specExp.FindStringSubmatch(strings.ToLower(raw)) + + // Must have something back + if len(match) == 0 { + return nil, errInvalidSpec(raw) + } + + var split []string + matchgroups := make(map[string]string) + // Pull out the sub expressions from the named capture groups + for i, name := range specExp.SubexpNames() { + matchgroups[name] = strings.ToLower(match[i]) + } + if source, exists := matchgroups["source"]; exists { + if source != "" { + split = append(split, source) + } + } + if destination, exists := matchgroups["destination"]; exists { + if destination != "" { + split = append(split, destination) + } + } + if mode, exists := matchgroups["mode"]; exists { + if mode != "" { + split = append(split, mode) + } + } + // Fix #26329. If the destination appears to be a file, and the source is null, + // it may be because we've fallen through the possible naming regex and hit a + // situation where the user intention was to map a file into a container through + // a local volume, but this is not supported by the platform. + if matchgroups["source"] == "" && matchgroups["destination"] != "" { + volExp := regexp.MustCompile(`^` + rxName + `$`) + reservedNameExp := regexp.MustCompile(`^` + rxReservedNames + `$`) + + if volExp.MatchString(matchgroups["destination"]) { + if reservedNameExp.MatchString(matchgroups["destination"]) { + return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"]) + } + } else { + + exists, isDir, _ := currentFileInfoProvider.fileInfo(matchgroups["destination"]) + if exists && !isDir { + return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"]) + + } + } + } + return split, nil +} + +func windowsValidMountMode(mode string) bool { + if mode == "" { + return true + } + return rwModes[strings.ToLower(mode)] +} +func windowsValidateNotRoot(p string) error { + p = strings.ToLower(strings.Replace(p, `/`, `\`, -1)) + if p == "c:" || p == `c:\` { + return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p) + } + return nil +} + +var windowsSpecificValidators mountValidator = func(mnt *mount.Mount) error { + return windowsValidateNotRoot(mnt.Target) +} + +func windowsValidateRegex(p, r string) error { + if regexp.MustCompile(`^` + r + `$`).MatchString(strings.ToLower(p)) { + return nil + } + return fmt.Errorf("invalid mount path: '%s'", p) +} +func windowsValidateAbsolute(p string) error { + if err := windowsValidateRegex(p, rxDestination); err != nil { + return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p) + } + return nil +} + +func windowsDetectMountType(p string) mount.Type { + if strings.HasPrefix(p, `\\.\pipe\`) { + return mount.TypeNamedPipe + } else if regexp.MustCompile(`^` + rxHostDir + `$`).MatchString(p) { + return mount.TypeBind + } else { + return mount.TypeVolume + } +} + +func (p *windowsParser) ReadWrite(mode string) bool { + return strings.ToLower(mode) != "ro" +} + +// IsVolumeNameValid checks a volume name in a platform specific manner. +func (p *windowsParser) ValidateVolumeName(name string) error { + nameExp := regexp.MustCompile(`^` + rxName + `$`) + if !nameExp.MatchString(name) { + return errors.New("invalid volume name") + } + nameExp = regexp.MustCompile(`^` + rxReservedNames + `$`) + if nameExp.MatchString(name) { + return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name) + } + return nil +} +func (p *windowsParser) ValidateMountConfig(mnt *mount.Mount) error { + return p.validateMountConfigReg(mnt, rxDestination, windowsSpecificValidators) +} + +type fileInfoProvider interface { + fileInfo(path string) (exist, isDir bool, err error) +} + +type defaultFileInfoProvider struct { +} + +func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) { + fi, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return false, false, err + } + return false, false, nil + } + return true, fi.IsDir(), nil +} + +var currentFileInfoProvider fileInfoProvider = defaultFileInfoProvider{} + +func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex string, additionalValidators ...mountValidator) error { + + for _, v := range additionalValidators { + if err := v(mnt); err != nil { + return &errMountConfig{mnt, err} + } + } + if len(mnt.Target) == 0 { + return &errMountConfig{mnt, errMissingField("Target")} + } + + if err := windowsValidateRegex(mnt.Target, destRegex); err != nil { + return &errMountConfig{mnt, err} + } + + switch mnt.Type { + case mount.TypeBind: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + // Don't error out just because the propagation mode is not supported on the platform + if opts := mnt.BindOptions; opts != nil { + if len(opts.Propagation) > 0 { + return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)} + } + } + if mnt.VolumeOptions != nil { + return &errMountConfig{mnt, errExtraField("VolumeOptions")} + } + + if err := windowsValidateAbsolute(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + + exists, isdir, err := currentFileInfoProvider.fileInfo(mnt.Source) + if err != nil { + return &errMountConfig{mnt, err} + } + if !exists { + return &errMountConfig{mnt, errBindSourceDoesNotExist(mnt.Source)} + } + if !isdir { + return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")} + } + + case mount.TypeVolume: + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if len(mnt.Source) == 0 && mnt.ReadOnly { + return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")} + } + + if len(mnt.Source) != 0 { + if err := p.ValidateVolumeName(mnt.Source); err != nil { + return &errMountConfig{mnt, err} + } + } + case mount.TypeNamedPipe: + if len(mnt.Source) == 0 { + return &errMountConfig{mnt, errMissingField("Source")} + } + + if mnt.BindOptions != nil { + return &errMountConfig{mnt, errExtraField("BindOptions")} + } + + if mnt.ReadOnly { + return &errMountConfig{mnt, errExtraField("ReadOnly")} + } + + if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)} + } + + if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe { + return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)} + } + default: + return &errMountConfig{mnt, errors.New("mount type unknown")} + } + return nil +} +func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) { + return p.parseMountRaw(raw, volumeDriver, rxDestination, true, windowsSpecificValidators) +} + +func (p *windowsParser) parseMountRaw(raw, volumeDriver, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + arr, err := windowsSplitRawSpec(raw, destRegex) + if err != nil { + return nil, err + } + + var spec mount.Mount + var mode string + switch len(arr) { + case 1: + // Just a destination path in the container + spec.Target = arr[0] + case 2: + if windowsValidMountMode(arr[1]) { + // Destination + Mode is not a valid volume - volumes + // cannot include a mode. e.g. /foo:rw + return nil, errInvalidSpec(raw) + } + // Host Source Path or Name + Destination + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + case 3: + // HostSourcePath+DestinationPath+Mode + spec.Source = strings.Replace(arr[0], `/`, `\`, -1) + spec.Target = arr[1] + mode = arr[2] + default: + return nil, errInvalidSpec(raw) + } + if convertTargetToBackslash { + spec.Target = strings.Replace(spec.Target, `/`, `\`, -1) + } + + if !windowsValidMountMode(mode) { + return nil, errInvalidMode(mode) + } + + spec.Type = windowsDetectMountType(spec.Source) + spec.ReadOnly = !p.ReadWrite(mode) + + // cannot assume that if a volume driver is passed in that we should set it + if volumeDriver != "" && spec.Type == mount.TypeVolume { + spec.VolumeOptions = &mount.VolumeOptions{ + DriverConfig: &mount.Driver{Name: volumeDriver}, + } + } + + if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + if spec.VolumeOptions == nil { + spec.VolumeOptions = &mount.VolumeOptions{} + } + spec.VolumeOptions.NoCopy = !copyData + } + + mp, err := p.parseMountSpec(spec, destRegex, convertTargetToBackslash, additionalValidators...) + if mp != nil { + mp.Mode = mode + } + if err != nil { + err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err) + } + return mp, err +} + +func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) { + return p.parseMountSpec(cfg, rxDestination, true, windowsSpecificValidators) +} +func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) { + if err := p.validateMountConfigReg(&cfg, destRegex, additionalValidators...); err != nil { + return nil, err + } + mp := &MountPoint{ + RW: !cfg.ReadOnly, + Destination: cfg.Target, + Type: cfg.Type, + Spec: cfg, + } + if convertTargetToBackslash { + mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1) + } + + switch cfg.Type { + case mount.TypeVolume: + if cfg.Source == "" { + mp.Name = stringid.GenerateRandomID() + } else { + mp.Name = cfg.Source + } + mp.CopyData = p.DefaultCopyMode() + + if cfg.VolumeOptions != nil { + if cfg.VolumeOptions.DriverConfig != nil { + mp.Driver = cfg.VolumeOptions.DriverConfig.Name + } + if cfg.VolumeOptions.NoCopy { + mp.CopyData = false + } + } + case mount.TypeBind: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + case mount.TypeNamedPipe: + mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1) + } + // cleanup trailing `\` except for paths like `c:\` + if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' { + mp.Source = mp.Source[:len(mp.Source)-1] + } + if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' { + mp.Destination = mp.Destination[:len(mp.Destination)-1] + } + return mp, nil +} + +func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) { + if len(spec) == 0 { + return "", "", fmt.Errorf("volumes-from specification cannot be an empty string") + } + + specParts := strings.SplitN(spec, ":", 2) + id := specParts[0] + mode := "rw" + + if len(specParts) == 2 { + mode = specParts[1] + if !windowsValidMountMode(mode) { + return "", "", errInvalidMode(mode) + } + + // Do not allow copy modes on volumes-from + if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet { + return "", "", errInvalidMode(mode) + } + } + return id, mode, nil +} + +func (p *windowsParser) DefaultPropagationMode() mount.Propagation { + return mount.Propagation("") +} + +func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) { + return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS) +} +func (p *windowsParser) DefaultCopyMode() bool { + return false +} +func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool { + return false +} + +func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error { + return errors.New("Platform does not support tmpfs") +} diff --git a/vendor/github.com/docker/docker/volume/volume.go b/vendor/github.com/docker/docker/volume/volume.go new file mode 100644 index 0000000000..61c8243979 --- /dev/null +++ b/vendor/github.com/docker/docker/volume/volume.go @@ -0,0 +1,69 @@ +package volume // import "github.com/docker/docker/volume" + +import ( + "time" +) + +// DefaultDriverName is the driver name used for the driver +// implemented in the local package. +const DefaultDriverName = "local" + +// Scopes define if a volume has is cluster-wide (global) or local only. +// Scopes are returned by the volume driver when it is queried for capabilities and then set on a volume +const ( + LocalScope = "local" + GlobalScope = "global" +) + +// Driver is for creating and removing volumes. +type Driver interface { + // Name returns the name of the volume driver. + Name() string + // Create makes a new volume with the given name. + Create(name string, opts map[string]string) (Volume, error) + // Remove deletes the volume. + Remove(vol Volume) (err error) + // List lists all the volumes the driver has + List() ([]Volume, error) + // Get retrieves the volume with the requested name + Get(name string) (Volume, error) + // Scope returns the scope of the driver (e.g. `global` or `local`). + // Scope determines how the driver is handled at a cluster level + Scope() string +} + +// Capability defines a set of capabilities that a driver is able to handle. +type Capability struct { + // Scope is the scope of the driver, `global` or `local` + // A `global` scope indicates that the driver manages volumes across the cluster + // A `local` scope indicates that the driver only manages volumes resources local to the host + // Scope is declared by the driver + Scope string +} + +// Volume is a place to store data. It is backed by a specific driver, and can be mounted. +type Volume interface { + // Name returns the name of the volume + Name() string + // DriverName returns the name of the driver which owns this volume. + DriverName() string + // Path returns the absolute path to the volume. + Path() string + // Mount mounts the volume and returns the absolute path to + // where it can be consumed. + Mount(id string) (string, error) + // Unmount unmounts the volume when it is no longer in use. + Unmount(id string) error + // CreatedAt returns Volume Creation time + CreatedAt() (time.Time, error) + // Status returns low-level status information about a volume + Status() map[string]interface{} +} + +// DetailedVolume wraps a Volume with user-defined labels, options, and cluster scope (e.g., `local` or `global`) +type DetailedVolume interface { + Labels() map[string]string + Options() map[string]string + Scope() string + Volume +} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 0000000000..b55b37bc31 --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 0000000000..bb7e4e3369 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 0000000000..892adf8c66 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 0000000000..ce950171e3 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/sockets/README.md b/vendor/github.com/docker/go-connections/sockets/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go new file mode 100644 index 0000000000..99846ffddb --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/inmem_socket.go @@ -0,0 +1,81 @@ +package sockets + +import ( + "errors" + "net" + "sync" +) + +var errClosed = errors.New("use of closed network connection") + +// InmemSocket implements net.Listener using in-memory only connections. +type InmemSocket struct { + chConn chan net.Conn + chClose chan struct{} + addr string + mu sync.Mutex +} + +// dummyAddr is used to satisfy net.Addr for the in-mem socket +// it is just stored as a string and returns the string for all calls +type dummyAddr string + +// NewInmemSocket creates an in-memory only net.Listener +// The addr argument can be any string, but is used to satisfy the `Addr()` part +// of the net.Listener interface +func NewInmemSocket(addr string, bufSize int) *InmemSocket { + return &InmemSocket{ + chConn: make(chan net.Conn, bufSize), + chClose: make(chan struct{}), + addr: addr, + } +} + +// Addr returns the socket's addr string to satisfy net.Listener +func (s *InmemSocket) Addr() net.Addr { + return dummyAddr(s.addr) +} + +// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. +func (s *InmemSocket) Accept() (net.Conn, error) { + select { + case conn := <-s.chConn: + return conn, nil + case <-s.chClose: + return nil, errClosed + } +} + +// Close closes the listener. It will be unavailable for use once closed. +func (s *InmemSocket) Close() error { + s.mu.Lock() + defer s.mu.Unlock() + select { + case <-s.chClose: + default: + close(s.chClose) + } + return nil +} + +// Dial is used to establish a connection with the in-mem server +func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { + srvConn, clientConn := net.Pipe() + select { + case s.chConn <- srvConn: + case <-s.chClose: + return nil, errClosed + } + + return clientConn, nil +} + +// Network returns the addr string, satisfies net.Addr +func (a dummyAddr) Network() string { + return string(a) +} + +// String returns the string form +func (a dummyAddr) String() string { + return string(a) +} diff --git a/vendor/github.com/docker/go-connections/sockets/proxy.go b/vendor/github.com/docker/go-connections/sockets/proxy.go new file mode 100644 index 0000000000..98e9a1dc61 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/proxy.go @@ -0,0 +1,51 @@ +package sockets + +import ( + "net" + "net/url" + "os" + "strings" + + "golang.org/x/net/proxy" +) + +// GetProxyEnv allows access to the uppercase and the lowercase forms of +// proxy-related variables. See the Go specification for details on these +// variables. https://golang.org/pkg/net/http/ +func GetProxyEnv(key string) string { + proxyValue := os.Getenv(strings.ToUpper(key)) + if proxyValue == "" { + return os.Getenv(strings.ToLower(key)) + } + return proxyValue +} + +// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a +// proxy.Dialer which will route the connections through the proxy using the +// given dialer. +func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { + allProxy := GetProxyEnv("all_proxy") + if len(allProxy) == 0 { + return direct, nil + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return direct, err + } + + proxyFromURL, err := proxy.FromURL(proxyURL, direct) + if err != nil { + return direct, err + } + + noProxy := GetProxyEnv("no_proxy") + if len(noProxy) == 0 { + return proxyFromURL, nil + } + + perHost := proxy.NewPerHost(proxyFromURL, direct) + perHost.AddFromString(noProxy) + + return perHost, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets.go b/vendor/github.com/docker/go-connections/sockets/sockets.go new file mode 100644 index 0000000000..a1d7beb4d8 --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets.go @@ -0,0 +1,38 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "errors" + "net" + "net/http" + "time" +) + +// Why 32? See https://github.com/docker/docker/pull/8035. +const defaultTimeout = 32 * time.Second + +// ErrProtocolNotAvailable is returned when a given transport protocol is not provided by the operating system. +var ErrProtocolNotAvailable = errors.New("protocol not available") + +// ConfigureTransport configures the specified Transport according to the +// specified proto and addr. +// If the proto is unix (using a unix socket to communicate) or npipe the +// compression is disabled. +func ConfigureTransport(tr *http.Transport, proto, addr string) error { + switch proto { + case "unix": + return configureUnixTransport(tr, proto, addr) + case "npipe": + return configureNpipeTransport(tr, proto, addr) + default: + tr.Proxy = http.ProxyFromEnvironment + dialer, err := DialerFromEnvironment(&net.Dialer{ + Timeout: defaultTimeout, + }) + if err != nil { + return err + } + tr.Dial = dialer.Dial + } + return nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go new file mode 100644 index 0000000000..386cf0dbbd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package sockets + +import ( + "fmt" + "net" + "net/http" + "syscall" + "time" +) + +const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + if len(addr) > maxUnixSocketPathSize { + return fmt.Errorf("Unix socket path %q is too long", addr) + } + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return net.DialTimeout(proto, addr, defaultTimeout) + } + return nil +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +// DialPipe connects to a Windows named pipe. +// This is not supported on other OSes. +func DialPipe(_ string, _ time.Duration) (net.Conn, error) { + return nil, syscall.EAFNOSUPPORT +} diff --git a/vendor/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go new file mode 100644 index 0000000000..5c21644e1f --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/sockets_windows.go @@ -0,0 +1,27 @@ +package sockets + +import ( + "net" + "net/http" + "time" + + "github.com/Microsoft/go-winio" +) + +func configureUnixTransport(tr *http.Transport, proto, addr string) error { + return ErrProtocolNotAvailable +} + +func configureNpipeTransport(tr *http.Transport, proto, addr string) error { + // No need for compression in local communications. + tr.DisableCompression = true + tr.Dial = func(_, _ string) (net.Conn, error) { + return DialPipe(addr, defaultTimeout) + } + return nil +} + +// DialPipe connects to a Windows named pipe. +func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { + return winio.DialPipe(addr, &timeout) +} diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go new file mode 100644 index 0000000000..53cbb6c79e --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -0,0 +1,22 @@ +// Package sockets provides helper functions to create and configure Unix or TCP sockets. +package sockets + +import ( + "crypto/tls" + "net" +) + +// NewTCPSocket creates a TCP socket listener with the specified address and +// the specified tls configuration. If TLSConfig is set, will encapsulate the +// TCP listener inside a TLS one. +func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { + l, err := net.Listen("tcp", addr) + if err != nil { + return nil, err + } + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"http/1.1"} + l = tls.NewListener(l, tlsConfig) + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/github.com/docker/go-connections/sockets/unix_socket.go new file mode 100644 index 0000000000..a8b5dbb6fd --- /dev/null +++ b/vendor/github.com/docker/go-connections/sockets/unix_socket.go @@ -0,0 +1,32 @@ +// +build !windows + +package sockets + +import ( + "net" + "os" + "syscall" +) + +// NewUnixSocket creates a unix socket with the specified path and group. +func NewUnixSocket(path string, gid int) (net.Listener, error) { + if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { + return nil, err + } + mask := syscall.Umask(0777) + defer syscall.Umask(mask) + + l, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + if err := os.Chown(path, 0, gid); err != nil { + l.Close() + return nil, err + } + if err := os.Chmod(path, 0660); err != nil { + l.Close() + return nil, err + } + return l, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 0000000000..1ca0965e06 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 0000000000..1ff81c333c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 0000000000..0ef3fdcb46 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,254 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 0000000000..6b4c6a7c0d --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 0000000000..ee22df47cb --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 0000000000..9ea86d784e --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 0000000000..b55b37bc31 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 0000000000..4aac7c7411 --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,46 @@ +# go-units maintainers file +# +# This file describes who runs the docker/go-units project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "akihirosuda", + "dnephin", + "thajeztah", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "akihiro.suda.cz@hco.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 0000000000..4f70a4e134 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 0000000000..af9d605529 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get golang.org/x/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 0000000000..48dd8744d4 --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 0000000000..85f6ab0715 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,108 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 0000000000..fca0400cc8 --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,123 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if *hard != -1 { + if soft == -1 { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) + } + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/emirpasic/gods/LICENSE b/vendor/github.com/emirpasic/gods/LICENSE new file mode 100644 index 0000000000..e5e449b6ec --- /dev/null +++ b/vendor/github.com/emirpasic/gods/LICENSE @@ -0,0 +1,41 @@ +Copyright (c) 2015, Emir Pasic +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +------------------------------------------------------------------------------- + +AVL Tree: + +Copyright (c) 2017 Benjamin Scher Purcell + +Permission to use, copy, modify, and distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/emirpasic/gods/containers/containers.go b/vendor/github.com/emirpasic/gods/containers/containers.go new file mode 100644 index 0000000000..c35ab36d2c --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/containers.go @@ -0,0 +1,35 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package containers provides core interfaces and functions for data structures. +// +// Container is the base interface for all data structures to implement. +// +// Iterators provide stateful iterators. +// +// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions. +// +// Serialization provides serializers (marshalers) and deserializers (unmarshalers). +package containers + +import "github.com/emirpasic/gods/utils" + +// Container is base interface that all data structures implement. +type Container interface { + Empty() bool + Size() int + Clear() + Values() []interface{} +} + +// GetSortedValues returns sorted container's elements with respect to the passed comparator. +// Does not effect the ordering of elements within the container. +func GetSortedValues(container Container, comparator utils.Comparator) []interface{} { + values := container.Values() + if len(values) < 2 { + return values + } + utils.Sort(values, comparator) + return values +} diff --git a/vendor/github.com/emirpasic/gods/containers/enumerable.go b/vendor/github.com/emirpasic/gods/containers/enumerable.go new file mode 100644 index 0000000000..ac48b54531 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/enumerable.go @@ -0,0 +1,61 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package containers + +// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index. +type EnumerableWithIndex interface { + // Each calls the given function once for each element, passing that element's index and value. + Each(func(index int, value interface{})) + + // Map invokes the given function once for each element and returns a + // container containing the values returned by the given function. + // TODO would appreciate help on how to enforce this in containers (don't want to type assert when chaining) + // Map(func(index int, value interface{}) interface{}) Container + + // Select returns a new container containing all elements for which the given function returns a true value. + // TODO need help on how to enforce this in containers (don't want to type assert when chaining) + // Select(func(index int, value interface{}) bool) Container + + // Any passes each element of the container to the given function and + // returns true if the function ever returns true for any element. + Any(func(index int, value interface{}) bool) bool + + // All passes each element of the container to the given function and + // returns true if the function returns true for all elements. + All(func(index int, value interface{}) bool) bool + + // Find passes each element of the container to the given function and returns + // the first (index,value) for which the function is true or -1,nil otherwise + // if no element matches the criteria. + Find(func(index int, value interface{}) bool) (int, interface{}) +} + +// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs. +type EnumerableWithKey interface { + // Each calls the given function once for each element, passing that element's key and value. + Each(func(key interface{}, value interface{})) + + // Map invokes the given function once for each element and returns a container + // containing the values returned by the given function as key/value pairs. + // TODO need help on how to enforce this in containers (don't want to type assert when chaining) + // Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container + + // Select returns a new container containing all elements for which the given function returns a true value. + // TODO need help on how to enforce this in containers (don't want to type assert when chaining) + // Select(func(key interface{}, value interface{}) bool) Container + + // Any passes each element of the container to the given function and + // returns true if the function ever returns true for any element. + Any(func(key interface{}, value interface{}) bool) bool + + // All passes each element of the container to the given function and + // returns true if the function returns true for all elements. + All(func(key interface{}, value interface{}) bool) bool + + // Find passes each element of the container to the given function and returns + // the first (key,value) for which the function is true or nil,nil otherwise if no element + // matches the criteria. + Find(func(key interface{}, value interface{}) bool) (interface{}, interface{}) +} diff --git a/vendor/github.com/emirpasic/gods/containers/iterator.go b/vendor/github.com/emirpasic/gods/containers/iterator.go new file mode 100644 index 0000000000..f1a52a365a --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/iterator.go @@ -0,0 +1,109 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package containers + +// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. +type IteratorWithIndex interface { + // Next moves the iterator to the next element and returns true if there was a next element in the container. + // If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). + // If Next() was called for the first time, then it will point the iterator to the first element if it exists. + // Modifies the state of the iterator. + Next() bool + + // Value returns the current element's value. + // Does not modify the state of the iterator. + Value() interface{} + + // Index returns the current element's index. + // Does not modify the state of the iterator. + Index() int + + // Begin resets the iterator to its initial state (one-before-first) + // Call Next() to fetch the first element if any. + Begin() + + // First moves the iterator to the first element and returns true if there was a first element in the container. + // If First() returns true, then first element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + First() bool +} + +// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. +type IteratorWithKey interface { + // Next moves the iterator to the next element and returns true if there was a next element in the container. + // If Next() returns true, then next element's key and value can be retrieved by Key() and Value(). + // If Next() was called for the first time, then it will point the iterator to the first element if it exists. + // Modifies the state of the iterator. + Next() bool + + // Value returns the current element's value. + // Does not modify the state of the iterator. + Value() interface{} + + // Key returns the current element's key. + // Does not modify the state of the iterator. + Key() interface{} + + // Begin resets the iterator to its initial state (one-before-first) + // Call Next() to fetch the first element if any. + Begin() + + // First moves the iterator to the first element and returns true if there was a first element in the container. + // If First() returns true, then first element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + First() bool +} + +// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index. +// +// Essentially it is the same as IteratorWithIndex, but provides additional: +// +// Prev() function to enable traversal in reverse +// +// Last() function to move the iterator to the last element. +// +// End() function to move the iterator past the last element (one-past-the-end). +type ReverseIteratorWithIndex interface { + // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. + // If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + Prev() bool + + // End moves the iterator past the last element (one-past-the-end). + // Call Prev() to fetch the last element if any. + End() + + // Last moves the iterator to the last element and returns true if there was a last element in the container. + // If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). + // Modifies the state of the iterator. + Last() bool + + IteratorWithIndex +} + +// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs. +// +// Essentially it is the same as IteratorWithKey, but provides additional: +// +// Prev() function to enable traversal in reverse +// +// Last() function to move the iterator to the last element. +type ReverseIteratorWithKey interface { + // Prev moves the iterator to the previous element and returns true if there was a previous element in the container. + // If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + Prev() bool + + // End moves the iterator past the last element (one-past-the-end). + // Call Prev() to fetch the last element if any. + End() + + // Last moves the iterator to the last element and returns true if there was a last element in the container. + // If Last() returns true, then last element's key and value can be retrieved by Key() and Value(). + // Modifies the state of the iterator. + Last() bool + + IteratorWithKey +} diff --git a/vendor/github.com/emirpasic/gods/containers/serialization.go b/vendor/github.com/emirpasic/gods/containers/serialization.go new file mode 100644 index 0000000000..d7c90c83a0 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/containers/serialization.go @@ -0,0 +1,17 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package containers + +// JSONSerializer provides JSON serialization +type JSONSerializer interface { + // ToJSON outputs the JSON representation of containers's elements. + ToJSON() ([]byte, error) +} + +// JSONDeserializer provides JSON deserialization +type JSONDeserializer interface { + // FromJSON populates containers's elements from the input JSON representation. + FromJSON([]byte) error +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go new file mode 100644 index 0000000000..bfedac9eef --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/arraylist.go @@ -0,0 +1,228 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package arraylist implements the array list. +// +// Structure is not thread safe. +// +// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 +package arraylist + +import ( + "fmt" + "strings" + + "github.com/emirpasic/gods/lists" + "github.com/emirpasic/gods/utils" +) + +func assertListImplementation() { + var _ lists.List = (*List)(nil) +} + +// List holds the elements in a slice +type List struct { + elements []interface{} + size int +} + +const ( + growthFactor = float32(2.0) // growth by 100% + shrinkFactor = float32(0.25) // shrink when size is 25% of capacity (0 means never shrink) +) + +// New instantiates a new list and adds the passed values, if any, to the list +func New(values ...interface{}) *List { + list := &List{} + if len(values) > 0 { + list.Add(values...) + } + return list +} + +// Add appends a value at the end of the list +func (list *List) Add(values ...interface{}) { + list.growBy(len(values)) + for _, value := range values { + list.elements[list.size] = value + list.size++ + } +} + +// Get returns the element at index. +// Second return parameter is true if index is within bounds of the array and array is not empty, otherwise false. +func (list *List) Get(index int) (interface{}, bool) { + + if !list.withinRange(index) { + return nil, false + } + + return list.elements[index], true +} + +// Remove removes the element at the given index from the list. +func (list *List) Remove(index int) { + + if !list.withinRange(index) { + return + } + + list.elements[index] = nil // cleanup reference + copy(list.elements[index:], list.elements[index+1:list.size]) // shift to the left by one (slow operation, need ways to optimize this) + list.size-- + + list.shrink() +} + +// Contains checks if elements (one or more) are present in the set. +// All elements have to be present in the set for the method to return true. +// Performance time complexity of n^2. +// Returns true if no arguments are passed at all, i.e. set is always super-set of empty set. +func (list *List) Contains(values ...interface{}) bool { + + for _, searchValue := range values { + found := false + for _, element := range list.elements { + if element == searchValue { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +// Values returns all elements in the list. +func (list *List) Values() []interface{} { + newElements := make([]interface{}, list.size, list.size) + copy(newElements, list.elements[:list.size]) + return newElements +} + +//IndexOf returns index of provided element +func (list *List) IndexOf(value interface{}) int { + if list.size == 0 { + return -1 + } + for index, element := range list.elements { + if element == value { + return index + } + } + return -1 +} + +// Empty returns true if list does not contain any elements. +func (list *List) Empty() bool { + return list.size == 0 +} + +// Size returns number of elements within the list. +func (list *List) Size() int { + return list.size +} + +// Clear removes all elements from the list. +func (list *List) Clear() { + list.size = 0 + list.elements = []interface{}{} +} + +// Sort sorts values (in-place) using. +func (list *List) Sort(comparator utils.Comparator) { + if len(list.elements) < 2 { + return + } + utils.Sort(list.elements[:list.size], comparator) +} + +// Swap swaps the two values at the specified positions. +func (list *List) Swap(i, j int) { + if list.withinRange(i) && list.withinRange(j) { + list.elements[i], list.elements[j] = list.elements[j], list.elements[i] + } +} + +// Insert inserts values at specified index position shifting the value at that position (if any) and any subsequent elements to the right. +// Does not do anything if position is negative or bigger than list's size +// Note: position equal to list's size is valid, i.e. append. +func (list *List) Insert(index int, values ...interface{}) { + + if !list.withinRange(index) { + // Append + if index == list.size { + list.Add(values...) + } + return + } + + l := len(values) + list.growBy(l) + list.size += l + copy(list.elements[index+l:], list.elements[index:list.size-l]) + copy(list.elements[index:], values) +} + +// Set the value at specified index +// Does not do anything if position is negative or bigger than list's size +// Note: position equal to list's size is valid, i.e. append. +func (list *List) Set(index int, value interface{}) { + + if !list.withinRange(index) { + // Append + if index == list.size { + list.Add(value) + } + return + } + + list.elements[index] = value +} + +// String returns a string representation of container +func (list *List) String() string { + str := "ArrayList\n" + values := []string{} + for _, value := range list.elements[:list.size] { + values = append(values, fmt.Sprintf("%v", value)) + } + str += strings.Join(values, ", ") + return str +} + +// Check that the index is within bounds of the list +func (list *List) withinRange(index int) bool { + return index >= 0 && index < list.size +} + +func (list *List) resize(cap int) { + newElements := make([]interface{}, cap, cap) + copy(newElements, list.elements) + list.elements = newElements +} + +// Expand the array if necessary, i.e. capacity will be reached if we add n elements +func (list *List) growBy(n int) { + // When capacity is reached, grow by a factor of growthFactor and add number of elements + currentCapacity := cap(list.elements) + if list.size+n >= currentCapacity { + newCapacity := int(growthFactor * float32(currentCapacity+n)) + list.resize(newCapacity) + } +} + +// Shrink the array if necessary, i.e. when size is shrinkFactor percent of current capacity +func (list *List) shrink() { + if shrinkFactor == 0.0 { + return + } + // Shrink when size is at shrinkFactor * capacity + currentCapacity := cap(list.elements) + if list.size <= int(float32(currentCapacity)*shrinkFactor) { + list.resize(list.size) + } +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go new file mode 100644 index 0000000000..b3a8738825 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/enumerable.go @@ -0,0 +1,79 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arraylist + +import "github.com/emirpasic/gods/containers" + +func assertEnumerableImplementation() { + var _ containers.EnumerableWithIndex = (*List)(nil) +} + +// Each calls the given function once for each element, passing that element's index and value. +func (list *List) Each(f func(index int, value interface{})) { + iterator := list.Iterator() + for iterator.Next() { + f(iterator.Index(), iterator.Value()) + } +} + +// Map invokes the given function once for each element and returns a +// container containing the values returned by the given function. +func (list *List) Map(f func(index int, value interface{}) interface{}) *List { + newList := &List{} + iterator := list.Iterator() + for iterator.Next() { + newList.Add(f(iterator.Index(), iterator.Value())) + } + return newList +} + +// Select returns a new container containing all elements for which the given function returns a true value. +func (list *List) Select(f func(index int, value interface{}) bool) *List { + newList := &List{} + iterator := list.Iterator() + for iterator.Next() { + if f(iterator.Index(), iterator.Value()) { + newList.Add(iterator.Value()) + } + } + return newList +} + +// Any passes each element of the collection to the given function and +// returns true if the function ever returns true for any element. +func (list *List) Any(f func(index int, value interface{}) bool) bool { + iterator := list.Iterator() + for iterator.Next() { + if f(iterator.Index(), iterator.Value()) { + return true + } + } + return false +} + +// All passes each element of the collection to the given function and +// returns true if the function returns true for all elements. +func (list *List) All(f func(index int, value interface{}) bool) bool { + iterator := list.Iterator() + for iterator.Next() { + if !f(iterator.Index(), iterator.Value()) { + return false + } + } + return true +} + +// Find passes each element of the container to the given function and returns +// the first (index,value) for which the function is true or -1,nil otherwise +// if no element matches the criteria. +func (list *List) Find(f func(index int, value interface{}) bool) (int, interface{}) { + iterator := list.Iterator() + for iterator.Next() { + if f(iterator.Index(), iterator.Value()) { + return iterator.Index(), iterator.Value() + } + } + return -1, nil +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go new file mode 100644 index 0000000000..38a93f3a8f --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/iterator.go @@ -0,0 +1,83 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arraylist + +import "github.com/emirpasic/gods/containers" + +func assertIteratorImplementation() { + var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) +} + +// Iterator holding the iterator's state +type Iterator struct { + list *List + index int +} + +// Iterator returns a stateful iterator whose values can be fetched by an index. +func (list *List) Iterator() Iterator { + return Iterator{list: list, index: -1} +} + +// Next moves the iterator to the next element and returns true if there was a next element in the container. +// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). +// If Next() was called for the first time, then it will point the iterator to the first element if it exists. +// Modifies the state of the iterator. +func (iterator *Iterator) Next() bool { + if iterator.index < iterator.list.size { + iterator.index++ + } + return iterator.list.withinRange(iterator.index) +} + +// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. +// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Prev() bool { + if iterator.index >= 0 { + iterator.index-- + } + return iterator.list.withinRange(iterator.index) +} + +// Value returns the current element's value. +// Does not modify the state of the iterator. +func (iterator *Iterator) Value() interface{} { + return iterator.list.elements[iterator.index] +} + +// Index returns the current element's index. +// Does not modify the state of the iterator. +func (iterator *Iterator) Index() int { + return iterator.index +} + +// Begin resets the iterator to its initial state (one-before-first) +// Call Next() to fetch the first element if any. +func (iterator *Iterator) Begin() { + iterator.index = -1 +} + +// End moves the iterator past the last element (one-past-the-end). +// Call Prev() to fetch the last element if any. +func (iterator *Iterator) End() { + iterator.index = iterator.list.size +} + +// First moves the iterator to the first element and returns true if there was a first element in the container. +// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) First() bool { + iterator.Begin() + return iterator.Next() +} + +// Last moves the iterator to the last element and returns true if there was a last element in the container. +// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Last() bool { + iterator.End() + return iterator.Prev() +} diff --git a/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go new file mode 100644 index 0000000000..2f283fb97d --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/arraylist/serialization.go @@ -0,0 +1,29 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package arraylist + +import ( + "encoding/json" + "github.com/emirpasic/gods/containers" +) + +func assertSerializationImplementation() { + var _ containers.JSONSerializer = (*List)(nil) + var _ containers.JSONDeserializer = (*List)(nil) +} + +// ToJSON outputs the JSON representation of list's elements. +func (list *List) ToJSON() ([]byte, error) { + return json.Marshal(list.elements[:list.size]) +} + +// FromJSON populates list's elements from the input JSON representation. +func (list *List) FromJSON(data []byte) error { + err := json.Unmarshal(data, &list.elements) + if err == nil { + list.size = len(list.elements) + } + return err +} diff --git a/vendor/github.com/emirpasic/gods/lists/lists.go b/vendor/github.com/emirpasic/gods/lists/lists.go new file mode 100644 index 0000000000..1f6bb08e94 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/lists/lists.go @@ -0,0 +1,33 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package lists provides an abstract List interface. +// +// In computer science, a list or sequence is an abstract data type that represents an ordered sequence of values, where the same value may occur more than once. An instance of a list is a computer representation of the mathematical concept of a finite sequence; the (potentially) infinite analog of a list is a stream. Lists are a basic example of containers, as they contain other values. If the same value occurs multiple times, each occurrence is considered a distinct item. +// +// Reference: https://en.wikipedia.org/wiki/List_%28abstract_data_type%29 +package lists + +import ( + "github.com/emirpasic/gods/containers" + "github.com/emirpasic/gods/utils" +) + +// List interface that all lists implement +type List interface { + Get(index int) (interface{}, bool) + Remove(index int) + Add(values ...interface{}) + Contains(values ...interface{}) bool + Sort(comparator utils.Comparator) + Swap(index1, index2 int) + Insert(index int, values ...interface{}) + Set(index int, value interface{}) + + containers.Container + // Empty() bool + // Size() int + // Clear() + // Values() []interface{} +} diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go new file mode 100644 index 0000000000..70b28cf52d --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/binaryheap.go @@ -0,0 +1,163 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package binaryheap implements a binary heap backed by array list. +// +// Comparator defines this heap as either min or max heap. +// +// Structure is not thread safe. +// +// References: http://en.wikipedia.org/wiki/Binary_heap +package binaryheap + +import ( + "fmt" + "github.com/emirpasic/gods/lists/arraylist" + "github.com/emirpasic/gods/trees" + "github.com/emirpasic/gods/utils" + "strings" +) + +func assertTreeImplementation() { + var _ trees.Tree = (*Heap)(nil) +} + +// Heap holds elements in an array-list +type Heap struct { + list *arraylist.List + Comparator utils.Comparator +} + +// NewWith instantiates a new empty heap tree with the custom comparator. +func NewWith(comparator utils.Comparator) *Heap { + return &Heap{list: arraylist.New(), Comparator: comparator} +} + +// NewWithIntComparator instantiates a new empty heap with the IntComparator, i.e. elements are of type int. +func NewWithIntComparator() *Heap { + return &Heap{list: arraylist.New(), Comparator: utils.IntComparator} +} + +// NewWithStringComparator instantiates a new empty heap with the StringComparator, i.e. elements are of type string. +func NewWithStringComparator() *Heap { + return &Heap{list: arraylist.New(), Comparator: utils.StringComparator} +} + +// Push adds a value onto the heap and bubbles it up accordingly. +func (heap *Heap) Push(values ...interface{}) { + if len(values) == 1 { + heap.list.Add(values[0]) + heap.bubbleUp() + } else { + // Reference: https://en.wikipedia.org/wiki/Binary_heap#Building_a_heap + for _, value := range values { + heap.list.Add(value) + } + size := heap.list.Size()/2 + 1 + for i := size; i >= 0; i-- { + heap.bubbleDownIndex(i) + } + } +} + +// Pop removes top element on heap and returns it, or nil if heap is empty. +// Second return parameter is true, unless the heap was empty and there was nothing to pop. +func (heap *Heap) Pop() (value interface{}, ok bool) { + value, ok = heap.list.Get(0) + if !ok { + return + } + lastIndex := heap.list.Size() - 1 + heap.list.Swap(0, lastIndex) + heap.list.Remove(lastIndex) + heap.bubbleDown() + return +} + +// Peek returns top element on the heap without removing it, or nil if heap is empty. +// Second return parameter is true, unless the heap was empty and there was nothing to peek. +func (heap *Heap) Peek() (value interface{}, ok bool) { + return heap.list.Get(0) +} + +// Empty returns true if heap does not contain any elements. +func (heap *Heap) Empty() bool { + return heap.list.Empty() +} + +// Size returns number of elements within the heap. +func (heap *Heap) Size() int { + return heap.list.Size() +} + +// Clear removes all elements from the heap. +func (heap *Heap) Clear() { + heap.list.Clear() +} + +// Values returns all elements in the heap. +func (heap *Heap) Values() []interface{} { + return heap.list.Values() +} + +// String returns a string representation of container +func (heap *Heap) String() string { + str := "BinaryHeap\n" + values := []string{} + for _, value := range heap.list.Values() { + values = append(values, fmt.Sprintf("%v", value)) + } + str += strings.Join(values, ", ") + return str +} + +// Performs the "bubble down" operation. This is to place the element that is at the root +// of the heap in its correct place so that the heap maintains the min/max-heap order property. +func (heap *Heap) bubbleDown() { + heap.bubbleDownIndex(0) +} + +// Performs the "bubble down" operation. This is to place the element that is at the index +// of the heap in its correct place so that the heap maintains the min/max-heap order property. +func (heap *Heap) bubbleDownIndex(index int) { + size := heap.list.Size() + for leftIndex := index<<1 + 1; leftIndex < size; leftIndex = index<<1 + 1 { + rightIndex := index<<1 + 2 + smallerIndex := leftIndex + leftValue, _ := heap.list.Get(leftIndex) + rightValue, _ := heap.list.Get(rightIndex) + if rightIndex < size && heap.Comparator(leftValue, rightValue) > 0 { + smallerIndex = rightIndex + } + indexValue, _ := heap.list.Get(index) + smallerValue, _ := heap.list.Get(smallerIndex) + if heap.Comparator(indexValue, smallerValue) > 0 { + heap.list.Swap(index, smallerIndex) + } else { + break + } + index = smallerIndex + } +} + +// Performs the "bubble up" operation. This is to place a newly inserted +// element (i.e. last element in the list) in its correct place so that +// the heap maintains the min/max-heap order property. +func (heap *Heap) bubbleUp() { + index := heap.list.Size() - 1 + for parentIndex := (index - 1) >> 1; index > 0; parentIndex = (index - 1) >> 1 { + indexValue, _ := heap.list.Get(index) + parentValue, _ := heap.list.Get(parentIndex) + if heap.Comparator(parentValue, indexValue) <= 0 { + break + } + heap.list.Swap(index, parentIndex) + index = parentIndex + } +} + +// Check that the index is within bounds of the list +func (heap *Heap) withinRange(index int) bool { + return index >= 0 && index < heap.list.Size() +} diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go new file mode 100644 index 0000000000..beeb8d7013 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/iterator.go @@ -0,0 +1,84 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package binaryheap + +import "github.com/emirpasic/gods/containers" + +func assertIteratorImplementation() { + var _ containers.ReverseIteratorWithIndex = (*Iterator)(nil) +} + +// Iterator returns a stateful iterator whose values can be fetched by an index. +type Iterator struct { + heap *Heap + index int +} + +// Iterator returns a stateful iterator whose values can be fetched by an index. +func (heap *Heap) Iterator() Iterator { + return Iterator{heap: heap, index: -1} +} + +// Next moves the iterator to the next element and returns true if there was a next element in the container. +// If Next() returns true, then next element's index and value can be retrieved by Index() and Value(). +// If Next() was called for the first time, then it will point the iterator to the first element if it exists. +// Modifies the state of the iterator. +func (iterator *Iterator) Next() bool { + if iterator.index < iterator.heap.Size() { + iterator.index++ + } + return iterator.heap.withinRange(iterator.index) +} + +// Prev moves the iterator to the previous element and returns true if there was a previous element in the container. +// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Prev() bool { + if iterator.index >= 0 { + iterator.index-- + } + return iterator.heap.withinRange(iterator.index) +} + +// Value returns the current element's value. +// Does not modify the state of the iterator. +func (iterator *Iterator) Value() interface{} { + value, _ := iterator.heap.list.Get(iterator.index) + return value +} + +// Index returns the current element's index. +// Does not modify the state of the iterator. +func (iterator *Iterator) Index() int { + return iterator.index +} + +// Begin resets the iterator to its initial state (one-before-first) +// Call Next() to fetch the first element if any. +func (iterator *Iterator) Begin() { + iterator.index = -1 +} + +// End moves the iterator past the last element (one-past-the-end). +// Call Prev() to fetch the last element if any. +func (iterator *Iterator) End() { + iterator.index = iterator.heap.Size() +} + +// First moves the iterator to the first element and returns true if there was a first element in the container. +// If First() returns true, then first element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) First() bool { + iterator.Begin() + return iterator.Next() +} + +// Last moves the iterator to the last element and returns true if there was a last element in the container. +// If Last() returns true, then last element's index and value can be retrieved by Index() and Value(). +// Modifies the state of the iterator. +func (iterator *Iterator) Last() bool { + iterator.End() + return iterator.Prev() +} diff --git a/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go b/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go new file mode 100644 index 0000000000..00d0c7719c --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/binaryheap/serialization.go @@ -0,0 +1,22 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package binaryheap + +import "github.com/emirpasic/gods/containers" + +func assertSerializationImplementation() { + var _ containers.JSONSerializer = (*Heap)(nil) + var _ containers.JSONDeserializer = (*Heap)(nil) +} + +// ToJSON outputs the JSON representation of the heap. +func (heap *Heap) ToJSON() ([]byte, error) { + return heap.list.ToJSON() +} + +// FromJSON populates the heap from the input JSON representation. +func (heap *Heap) FromJSON(data []byte) error { + return heap.list.FromJSON(data) +} diff --git a/vendor/github.com/emirpasic/gods/trees/trees.go b/vendor/github.com/emirpasic/gods/trees/trees.go new file mode 100644 index 0000000000..a5a7427d34 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/trees/trees.go @@ -0,0 +1,21 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package trees provides an abstract Tree interface. +// +// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes. +// +// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29 +package trees + +import "github.com/emirpasic/gods/containers" + +// Tree interface that all trees implement +type Tree interface { + containers.Container + // Empty() bool + // Size() int + // Clear() + // Values() []interface{} +} diff --git a/vendor/github.com/emirpasic/gods/utils/comparator.go b/vendor/github.com/emirpasic/gods/utils/comparator.go new file mode 100644 index 0000000000..6a9afbf346 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/utils/comparator.go @@ -0,0 +1,251 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import "time" + +// Comparator will make type assertion (see IntComparator for example), +// which will panic if a or b are not of the asserted type. +// +// Should return a number: +// negative , if a < b +// zero , if a == b +// positive , if a > b +type Comparator func(a, b interface{}) int + +// StringComparator provides a fast comparison on strings +func StringComparator(a, b interface{}) int { + s1 := a.(string) + s2 := b.(string) + min := len(s2) + if len(s1) < len(s2) { + min = len(s1) + } + diff := 0 + for i := 0; i < min && diff == 0; i++ { + diff = int(s1[i]) - int(s2[i]) + } + if diff == 0 { + diff = len(s1) - len(s2) + } + if diff < 0 { + return -1 + } + if diff > 0 { + return 1 + } + return 0 +} + +// IntComparator provides a basic comparison on int +func IntComparator(a, b interface{}) int { + aAsserted := a.(int) + bAsserted := b.(int) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int8Comparator provides a basic comparison on int8 +func Int8Comparator(a, b interface{}) int { + aAsserted := a.(int8) + bAsserted := b.(int8) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int16Comparator provides a basic comparison on int16 +func Int16Comparator(a, b interface{}) int { + aAsserted := a.(int16) + bAsserted := b.(int16) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int32Comparator provides a basic comparison on int32 +func Int32Comparator(a, b interface{}) int { + aAsserted := a.(int32) + bAsserted := b.(int32) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Int64Comparator provides a basic comparison on int64 +func Int64Comparator(a, b interface{}) int { + aAsserted := a.(int64) + bAsserted := b.(int64) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UIntComparator provides a basic comparison on uint +func UIntComparator(a, b interface{}) int { + aAsserted := a.(uint) + bAsserted := b.(uint) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt8Comparator provides a basic comparison on uint8 +func UInt8Comparator(a, b interface{}) int { + aAsserted := a.(uint8) + bAsserted := b.(uint8) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt16Comparator provides a basic comparison on uint16 +func UInt16Comparator(a, b interface{}) int { + aAsserted := a.(uint16) + bAsserted := b.(uint16) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt32Comparator provides a basic comparison on uint32 +func UInt32Comparator(a, b interface{}) int { + aAsserted := a.(uint32) + bAsserted := b.(uint32) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// UInt64Comparator provides a basic comparison on uint64 +func UInt64Comparator(a, b interface{}) int { + aAsserted := a.(uint64) + bAsserted := b.(uint64) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Float32Comparator provides a basic comparison on float32 +func Float32Comparator(a, b interface{}) int { + aAsserted := a.(float32) + bAsserted := b.(float32) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// Float64Comparator provides a basic comparison on float64 +func Float64Comparator(a, b interface{}) int { + aAsserted := a.(float64) + bAsserted := b.(float64) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// ByteComparator provides a basic comparison on byte +func ByteComparator(a, b interface{}) int { + aAsserted := a.(byte) + bAsserted := b.(byte) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// RuneComparator provides a basic comparison on rune +func RuneComparator(a, b interface{}) int { + aAsserted := a.(rune) + bAsserted := b.(rune) + switch { + case aAsserted > bAsserted: + return 1 + case aAsserted < bAsserted: + return -1 + default: + return 0 + } +} + +// TimeComparator provides a basic comparison on time.Time +func TimeComparator(a, b interface{}) int { + aAsserted := a.(time.Time) + bAsserted := b.(time.Time) + + switch { + case aAsserted.After(bAsserted): + return 1 + case aAsserted.Before(bAsserted): + return -1 + default: + return 0 + } +} diff --git a/vendor/github.com/emirpasic/gods/utils/sort.go b/vendor/github.com/emirpasic/gods/utils/sort.go new file mode 100644 index 0000000000..79ced1f5d2 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/utils/sort.go @@ -0,0 +1,29 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package utils + +import "sort" + +// Sort sorts values (in-place) with respect to the given comparator. +// +// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices). +func Sort(values []interface{}, comparator Comparator) { + sort.Sort(sortable{values, comparator}) +} + +type sortable struct { + values []interface{} + comparator Comparator +} + +func (s sortable) Len() int { + return len(s.values) +} +func (s sortable) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] +} +func (s sortable) Less(i, j int) bool { + return s.comparator(s.values[i], s.values[j]) < 0 +} diff --git a/vendor/github.com/emirpasic/gods/utils/utils.go b/vendor/github.com/emirpasic/gods/utils/utils.go new file mode 100644 index 0000000000..1ad49cbc07 --- /dev/null +++ b/vendor/github.com/emirpasic/gods/utils/utils.go @@ -0,0 +1,47 @@ +// Copyright (c) 2015, Emir Pasic. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package utils provides common utility functions. +// +// Provided functionalities: +// - sorting +// - comparators +package utils + +import ( + "fmt" + "strconv" +) + +// ToString converts a value to string. +func ToString(value interface{}) string { + switch value.(type) { + case string: + return value.(string) + case int8: + return strconv.FormatInt(int64(value.(int8)), 10) + case int16: + return strconv.FormatInt(int64(value.(int16)), 10) + case int32: + return strconv.FormatInt(int64(value.(int32)), 10) + case int64: + return strconv.FormatInt(int64(value.(int64)), 10) + case uint8: + return strconv.FormatUint(uint64(value.(uint8)), 10) + case uint16: + return strconv.FormatUint(uint64(value.(uint16)), 10) + case uint32: + return strconv.FormatUint(uint64(value.(uint32)), 10) + case uint64: + return strconv.FormatUint(uint64(value.(uint64)), 10) + case float32: + return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64) + case float64: + return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64) + case bool: + return strconv.FormatBool(value.(bool)) + default: + return fmt.Sprintf("%+v", value) + } +} diff --git a/vendor/github.com/evanphx/json-patch/.travis.yml b/vendor/github.com/evanphx/json-patch/.travis.yml deleted file mode 100644 index 50e4afd19a..0000000000 --- a/vendor/github.com/evanphx/json-patch/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -language: go - -go: - - 1.14 - - 1.13 - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi - - go get github.com/jessevdk/go-flags - -script: - - go get - - go test -cover ./... - - cd ./v5 - - go get - - go test -cover ./... - -notifications: - email: false diff --git a/vendor/github.com/evanphx/json-patch/README.md b/vendor/github.com/evanphx/json-patch/README.md index 121b039dba..28e3516937 100644 --- a/vendor/github.com/evanphx/json-patch/README.md +++ b/vendor/github.com/evanphx/json-patch/README.md @@ -39,6 +39,25 @@ go get -u github.com/evanphx/json-patch/v5 which limits the total size increase in bytes caused by "copy" operations in a patch. It defaults to 0, which means there is no limit. +These global variables control the behavior of `jsonpatch.Apply`. + +An alternative to `jsonpatch.Apply` is `jsonpatch.ApplyWithOptions` whose behavior +is controlled by an `options` parameter of type `*jsonpatch.ApplyOptions`. + +Structure `jsonpatch.ApplyOptions` includes the configuration options above +and adds two new options: `AllowMissingPathOnRemove` and `EnsurePathExistsOnAdd`. + +When `AllowMissingPathOnRemove` is set to `true`, `jsonpatch.ApplyWithOptions` will ignore +`remove` operations whose `path` points to a non-existent location in the JSON document. +`AllowMissingPathOnRemove` defaults to `false` which will lead to `jsonpatch.ApplyWithOptions` +returning an error when hitting a missing `path` on `remove`. + +When `EnsurePathExistsOnAdd` is set to `true`, `jsonpatch.ApplyWithOptions` will make sure +that `add` operations produce all the `path` elements that are missing from the target object. + +Use `jsonpatch.NewApplyOptions` to create an instance of `jsonpatch.ApplyOptions` +whose values are populated from the global configuration variables. + ## Create and apply a merge patch Given both an original JSON document and a modified JSON document, you can create a [Merge Patch](https://tools.ietf.org/html/rfc7396) document. diff --git a/vendor/github.com/evanphx/json-patch/merge.go b/vendor/github.com/evanphx/json-patch/merge.go index 14e8bb5ce3..ad88d40181 100644 --- a/vendor/github.com/evanphx/json-patch/merge.go +++ b/vendor/github.com/evanphx/json-patch/merge.go @@ -38,7 +38,10 @@ func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { cur, ok := (*doc)[k] if !ok || cur == nil { - pruneNulls(v) + if !mergeMerge { + pruneNulls(v) + } + (*doc)[k] = v } else { (*doc)[k] = merge(cur, v, mergeMerge) @@ -79,8 +82,8 @@ func pruneAryNulls(ary *partialArray) *partialArray { for _, v := range *ary { if v != nil { pruneNulls(v) - newAry = append(newAry, v) } + newAry = append(newAry, v) } *ary = newAry @@ -88,8 +91,8 @@ func pruneAryNulls(ary *partialArray) *partialArray { return ary } -var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") -var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var ErrBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var ErrBadJSONPatch = fmt.Errorf("Invalid JSON Patch") var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") // MergeMergePatches merges two merge patches together, such that @@ -114,19 +117,19 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { patchErr := json.Unmarshal(patchData, patch) if _, ok := docErr.(*json.SyntaxError); ok { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } if _, ok := patchErr.(*json.SyntaxError); ok { - return nil, errBadJSONPatch + return nil, ErrBadJSONPatch } if docErr == nil && *doc == nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } if patchErr == nil && *patch == nil { - return nil, errBadJSONPatch + return nil, ErrBadJSONPatch } if docErr != nil || patchErr != nil { @@ -142,7 +145,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { patchErr = json.Unmarshal(patchData, patchAry) if patchErr != nil { - return nil, errBadJSONPatch + return nil, ErrBadJSONPatch } pruneAryNulls(patchAry) @@ -150,7 +153,7 @@ func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { out, patchErr := json.Marshal(patchAry) if patchErr != nil { - return nil, errBadJSONPatch + return nil, ErrBadJSONPatch } return out, nil @@ -207,12 +210,12 @@ func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { err := json.Unmarshal(originalJSON, &originalDoc) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } err = json.Unmarshal(modifiedJSON, &modifiedDoc) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } dest, err := getDiff(originalDoc, modifiedDoc) @@ -233,17 +236,17 @@ func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { err := json.Unmarshal(originalJSON, &originalDocs) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } err = json.Unmarshal(modifiedJSON, &modifiedDocs) if err != nil { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } total := len(originalDocs) if len(modifiedDocs) != total { - return nil, errBadJSONDoc + return nil, ErrBadJSONDoc } result := []json.RawMessage{} diff --git a/vendor/github.com/evanphx/json-patch/patch.go b/vendor/github.com/evanphx/json-patch/patch.go index f185a45b2c..1829854907 100644 --- a/vendor/github.com/evanphx/json-patch/patch.go +++ b/vendor/github.com/evanphx/json-patch/patch.go @@ -721,6 +721,10 @@ func (p Patch) Apply(doc []byte) ([]byte, error) { // ApplyIndent mutates a JSON document according to the patch, and returns the new // document indented. func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + var pd container if doc[0] == '[' { pd = &partialArray{} diff --git a/vendor/github.com/fatih/color/LICENSE.md b/vendor/github.com/fatih/color/LICENSE.md new file mode 100644 index 0000000000..25fdaf639d --- /dev/null +++ b/vendor/github.com/fatih/color/LICENSE.md @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Fatih Arslan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/fatih/color/README.md b/vendor/github.com/fatih/color/README.md new file mode 100644 index 0000000000..5152bf59bf --- /dev/null +++ b/vendor/github.com/fatih/color/README.md @@ -0,0 +1,178 @@ +# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color) + +Color lets you use colorized outputs in terms of [ANSI Escape +Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It +has support for Windows too! The API can be used in several ways, pick one that +suits you. + +![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg) + + +## Install + +```bash +go get github.com/fatih/color +``` + +## Examples + +### Standard colors + +```go +// Print with default helper functions +color.Cyan("Prints text in cyan.") + +// A newline will be appended automatically +color.Blue("Prints %s in blue.", "text") + +// These are using the default foreground colors +color.Red("We have red") +color.Magenta("And many others ..") + +``` + +### Mix and reuse colors + +```go +// Create a new color object +c := color.New(color.FgCyan).Add(color.Underline) +c.Println("Prints cyan text with an underline.") + +// Or just add them to New() +d := color.New(color.FgCyan, color.Bold) +d.Printf("This prints bold cyan %s\n", "too!.") + +// Mix up foreground and background colors, create new mixes! +red := color.New(color.FgRed) + +boldRed := red.Add(color.Bold) +boldRed.Println("This will print text in bold red.") + +whiteBackground := red.Add(color.BgWhite) +whiteBackground.Println("Red text with white background.") +``` + +### Use your own output (io.Writer) + +```go +// Use your own io.Writer output +color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + +blue := color.New(color.FgBlue) +blue.Fprint(writer, "This will print text in blue.") +``` + +### Custom print functions (PrintFunc) + +```go +// Create a custom print function for convenience +red := color.New(color.FgRed).PrintfFunc() +red("Warning") +red("Error: %s", err) + +// Mix up multiple attributes +notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() +notice("Don't forget this...") +``` + +### Custom fprint functions (FprintFunc) + +```go +blue := color.New(color.FgBlue).FprintfFunc() +blue(myWriter, "important notice: %s", stars) + +// Mix up with multiple attributes +success := color.New(color.Bold, color.FgGreen).FprintlnFunc() +success(myWriter, "Don't forget this...") +``` + +### Insert into noncolor strings (SprintFunc) + +```go +// Create SprintXxx functions to mix strings with other non-colorized strings: +yellow := color.New(color.FgYellow).SprintFunc() +red := color.New(color.FgRed).SprintFunc() +fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error")) + +info := color.New(color.FgWhite, color.BgGreen).SprintFunc() +fmt.Printf("This %s rocks!\n", info("package")) + +// Use helper functions +fmt.Println("This", color.RedString("warning"), "should be not neglected.") +fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.") + +// Windows supported too! Just don't forget to change the output to color.Output +fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) +``` + +### Plug into existing code + +```go +// Use handy standard colors +color.Set(color.FgYellow) + +fmt.Println("Existing text will now be in yellow") +fmt.Printf("This one %s\n", "too") + +color.Unset() // Don't forget to unset + +// You can mix up parameters +color.Set(color.FgMagenta, color.Bold) +defer color.Unset() // Use it in your function + +fmt.Println("All text will now be bold magenta.") +``` + +### Disable/Enable color + +There might be a case where you want to explicitly disable/enable color output. the +`go-isatty` package will automatically disable color output for non-tty output streams +(for example if the output were piped directly to `less`). + +The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment +variable is set (regardless of its value). + +`Color` has support to disable/enable colors programatically both globally and +for single color definitions. For example suppose you have a CLI app and a +`--no-color` bool flag. You can easily disable the color output with: + +```go +var flagNoColor = flag.Bool("no-color", false, "Disable color output") + +if *flagNoColor { + color.NoColor = true // disables colorized output +} +``` + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + +```go +c := color.New(color.FgCyan) +c.Println("Prints cyan text") + +c.DisableColor() +c.Println("This is printed without any color") + +c.EnableColor() +c.Println("This prints again cyan...") +``` + +## GitHub Actions + +To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams. + +## Todo + +* Save/Return previous values +* Evaluate fmt.Formatter interface + + +## Credits + + * [Fatih Arslan](https://github.com/fatih) + * Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable) + +## License + +The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details diff --git a/vendor/github.com/fatih/color/color.go b/vendor/github.com/fatih/color/color.go new file mode 100644 index 0000000000..98a60f3c88 --- /dev/null +++ b/vendor/github.com/fatih/color/color.go @@ -0,0 +1,618 @@ +package color + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + + "github.com/mattn/go-colorable" + "github.com/mattn/go-isatty" +) + +var ( + // NoColor defines if the output is colorized or not. It's dynamically set to + // false or true based on the stdout's file descriptor referring to a terminal + // or not. It's also set to true if the NO_COLOR environment variable is + // set (regardless of its value). This is a global option and affects all + // colors. For more control over each color block use the methods + // DisableColor() individually. + NoColor = noColorExists() || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd())) + + // Output defines the standard output of the print functions. By default + // os.Stdout is used. + Output = colorable.NewColorableStdout() + + // Error defines a color supporting writer for os.Stderr. + Error = colorable.NewColorableStderr() + + // colorsCache is used to reduce the count of created Color objects and + // allows to reuse already created objects with required Attribute. + colorsCache = make(map[Attribute]*Color) + colorsCacheMu sync.Mutex // protects colorsCache +) + +// noColorExists returns true if the environment variable NO_COLOR exists. +func noColorExists() bool { + _, exists := os.LookupEnv("NO_COLOR") + return exists +} + +// Color defines a custom color object which is defined by SGR parameters. +type Color struct { + params []Attribute + noColor *bool +} + +// Attribute defines a single SGR Code +type Attribute int + +const escape = "\x1b" + +// Base attributes +const ( + Reset Attribute = iota + Bold + Faint + Italic + Underline + BlinkSlow + BlinkRapid + ReverseVideo + Concealed + CrossedOut +) + +// Foreground text colors +const ( + FgBlack Attribute = iota + 30 + FgRed + FgGreen + FgYellow + FgBlue + FgMagenta + FgCyan + FgWhite +) + +// Foreground Hi-Intensity text colors +const ( + FgHiBlack Attribute = iota + 90 + FgHiRed + FgHiGreen + FgHiYellow + FgHiBlue + FgHiMagenta + FgHiCyan + FgHiWhite +) + +// Background text colors +const ( + BgBlack Attribute = iota + 40 + BgRed + BgGreen + BgYellow + BgBlue + BgMagenta + BgCyan + BgWhite +) + +// Background Hi-Intensity text colors +const ( + BgHiBlack Attribute = iota + 100 + BgHiRed + BgHiGreen + BgHiYellow + BgHiBlue + BgHiMagenta + BgHiCyan + BgHiWhite +) + +// New returns a newly created color object. +func New(value ...Attribute) *Color { + c := &Color{ + params: make([]Attribute, 0), + } + + if noColorExists() { + c.noColor = boolPtr(true) + } + + c.Add(value...) + return c +} + +// Set sets the given parameters immediately. It will change the color of +// output with the given SGR parameters until color.Unset() is called. +func Set(p ...Attribute) *Color { + c := New(p...) + c.Set() + return c +} + +// Unset resets all escape attributes and clears the output. Usually should +// be called after Set(). +func Unset() { + if NoColor { + return + } + + fmt.Fprintf(Output, "%s[%dm", escape, Reset) +} + +// Set sets the SGR sequence. +func (c *Color) Set() *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(Output, c.format()) + return c +} + +func (c *Color) unset() { + if c.isNoColorSet() { + return + } + + Unset() +} + +func (c *Color) setWriter(w io.Writer) *Color { + if c.isNoColorSet() { + return c + } + + fmt.Fprintf(w, c.format()) + return c +} + +func (c *Color) unsetWriter(w io.Writer) { + if c.isNoColorSet() { + return + } + + if NoColor { + return + } + + fmt.Fprintf(w, "%s[%dm", escape, Reset) +} + +// Add is used to chain SGR parameters. Use as many as parameters to combine +// and create custom color objects. Example: Add(color.FgRed, color.Underline). +func (c *Color) Add(value ...Attribute) *Color { + c.params = append(c.params, value...) + return c +} + +func (c *Color) prepend(value Attribute) { + c.params = append(c.params, 0) + copy(c.params[1:], c.params[0:]) + c.params[0] = value +} + +// Fprint formats using the default formats for its operands and writes to w. +// Spaces are added between operands when neither is a string. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprint(w, a...) +} + +// Print formats using the default formats for its operands and writes to +// standard output. Spaces are added between operands when neither is a +// string. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Print(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprint(Output, a...) +} + +// Fprintf formats according to a format specifier and writes to w. +// It returns the number of bytes written and any write error encountered. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintf(w, format, a...) +} + +// Printf formats according to a format specifier and writes to standard output. +// It returns the number of bytes written and any write error encountered. +// This is the standard fmt.Printf() method wrapped with the given color. +func (c *Color) Printf(format string, a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintf(Output, format, a...) +} + +// Fprintln formats using the default formats for its operands and writes to w. +// Spaces are always added between operands and a newline is appended. +// On Windows, users should wrap w with colorable.NewColorable() if w is of +// type *os.File. +func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + c.setWriter(w) + defer c.unsetWriter(w) + + return fmt.Fprintln(w, a...) +} + +// Println formats using the default formats for its operands and writes to +// standard output. Spaces are always added between operands and a newline is +// appended. It returns the number of bytes written and any write error +// encountered. This is the standard fmt.Print() method wrapped with the given +// color. +func (c *Color) Println(a ...interface{}) (n int, err error) { + c.Set() + defer c.unset() + + return fmt.Fprintln(Output, a...) +} + +// Sprint is just like Print, but returns a string instead of printing it. +func (c *Color) Sprint(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) +} + +// Sprintln is just like Println, but returns a string instead of printing it. +func (c *Color) Sprintln(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) +} + +// Sprintf is just like Printf, but returns a string instead of printing it. +func (c *Color) Sprintf(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) +} + +// FprintFunc returns a new function that prints the passed arguments as +// colorized with color.Fprint(). +func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprint(w, a...) + } +} + +// PrintFunc returns a new function that prints the passed arguments as +// colorized with color.Print(). +func (c *Color) PrintFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Print(a...) + } +} + +// FprintfFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintf(). +func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) { + return func(w io.Writer, format string, a ...interface{}) { + c.Fprintf(w, format, a...) + } +} + +// PrintfFunc returns a new function that prints the passed arguments as +// colorized with color.Printf(). +func (c *Color) PrintfFunc() func(format string, a ...interface{}) { + return func(format string, a ...interface{}) { + c.Printf(format, a...) + } +} + +// FprintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Fprintln(). +func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) { + return func(w io.Writer, a ...interface{}) { + c.Fprintln(w, a...) + } +} + +// PrintlnFunc returns a new function that prints the passed arguments as +// colorized with color.Println(). +func (c *Color) PrintlnFunc() func(a ...interface{}) { + return func(a ...interface{}) { + c.Println(a...) + } +} + +// SprintFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprint(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output, example: +// +// put := New(FgYellow).SprintFunc() +// fmt.Fprintf(color.Output, "This is a %s", put("warning")) +func (c *Color) SprintFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprint(a...)) + } +} + +// SprintfFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintf(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintfFunc() func(format string, a ...interface{}) string { + return func(format string, a ...interface{}) string { + return c.wrap(fmt.Sprintf(format, a...)) + } +} + +// SprintlnFunc returns a new function that returns colorized strings for the +// given arguments with fmt.Sprintln(). Useful to put into or mix into other +// string. Windows users should use this in conjunction with color.Output. +func (c *Color) SprintlnFunc() func(a ...interface{}) string { + return func(a ...interface{}) string { + return c.wrap(fmt.Sprintln(a...)) + } +} + +// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m" +// an example output might be: "1;36" -> bold cyan +func (c *Color) sequence() string { + format := make([]string, len(c.params)) + for i, v := range c.params { + format[i] = strconv.Itoa(int(v)) + } + + return strings.Join(format, ";") +} + +// wrap wraps the s string with the colors attributes. The string is ready to +// be printed. +func (c *Color) wrap(s string) string { + if c.isNoColorSet() { + return s + } + + return c.format() + s + c.unformat() +} + +func (c *Color) format() string { + return fmt.Sprintf("%s[%sm", escape, c.sequence()) +} + +func (c *Color) unformat() string { + return fmt.Sprintf("%s[%dm", escape, Reset) +} + +// DisableColor disables the color output. Useful to not change any existing +// code and still being able to output. Can be used for flags like +// "--no-color". To enable back use EnableColor() method. +func (c *Color) DisableColor() { + c.noColor = boolPtr(true) +} + +// EnableColor enables the color output. Use it in conjunction with +// DisableColor(). Otherwise this method has no side effects. +func (c *Color) EnableColor() { + c.noColor = boolPtr(false) +} + +func (c *Color) isNoColorSet() bool { + // check first if we have user set action + if c.noColor != nil { + return *c.noColor + } + + // if not return the global option, which is disabled by default + return NoColor +} + +// Equals returns a boolean value indicating whether two colors are equal. +func (c *Color) Equals(c2 *Color) bool { + if len(c.params) != len(c2.params) { + return false + } + + for _, attr := range c.params { + if !c2.attrExists(attr) { + return false + } + } + + return true +} + +func (c *Color) attrExists(a Attribute) bool { + for _, attr := range c.params { + if attr == a { + return true + } + } + + return false +} + +func boolPtr(v bool) *bool { + return &v +} + +func getCachedColor(p Attribute) *Color { + colorsCacheMu.Lock() + defer colorsCacheMu.Unlock() + + c, ok := colorsCache[p] + if !ok { + c = New(p) + colorsCache[p] = c + } + + return c +} + +func colorPrint(format string, p Attribute, a ...interface{}) { + c := getCachedColor(p) + + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + if len(a) == 0 { + c.Print(format) + } else { + c.Printf(format, a...) + } +} + +func colorString(format string, p Attribute, a ...interface{}) string { + c := getCachedColor(p) + + if len(a) == 0 { + return c.SprintFunc()(format) + } + + return c.SprintfFunc()(format, a...) +} + +// Black is a convenient helper function to print with black foreground. A +// newline is appended to format by default. +func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) } + +// Red is a convenient helper function to print with red foreground. A +// newline is appended to format by default. +func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) } + +// Green is a convenient helper function to print with green foreground. A +// newline is appended to format by default. +func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) } + +// Yellow is a convenient helper function to print with yellow foreground. +// A newline is appended to format by default. +func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) } + +// Blue is a convenient helper function to print with blue foreground. A +// newline is appended to format by default. +func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) } + +// Magenta is a convenient helper function to print with magenta foreground. +// A newline is appended to format by default. +func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) } + +// Cyan is a convenient helper function to print with cyan foreground. A +// newline is appended to format by default. +func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) } + +// White is a convenient helper function to print with white foreground. A +// newline is appended to format by default. +func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) } + +// BlackString is a convenient helper function to return a string with black +// foreground. +func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) } + +// RedString is a convenient helper function to return a string with red +// foreground. +func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) } + +// GreenString is a convenient helper function to return a string with green +// foreground. +func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) } + +// YellowString is a convenient helper function to return a string with yellow +// foreground. +func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) } + +// BlueString is a convenient helper function to return a string with blue +// foreground. +func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) } + +// MagentaString is a convenient helper function to return a string with magenta +// foreground. +func MagentaString(format string, a ...interface{}) string { + return colorString(format, FgMagenta, a...) +} + +// CyanString is a convenient helper function to return a string with cyan +// foreground. +func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) } + +// WhiteString is a convenient helper function to return a string with white +// foreground. +func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) } + +// HiBlack is a convenient helper function to print with hi-intensity black foreground. A +// newline is appended to format by default. +func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) } + +// HiRed is a convenient helper function to print with hi-intensity red foreground. A +// newline is appended to format by default. +func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) } + +// HiGreen is a convenient helper function to print with hi-intensity green foreground. A +// newline is appended to format by default. +func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) } + +// HiYellow is a convenient helper function to print with hi-intensity yellow foreground. +// A newline is appended to format by default. +func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) } + +// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A +// newline is appended to format by default. +func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) } + +// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground. +// A newline is appended to format by default. +func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) } + +// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A +// newline is appended to format by default. +func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) } + +// HiWhite is a convenient helper function to print with hi-intensity white foreground. A +// newline is appended to format by default. +func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) } + +// HiBlackString is a convenient helper function to return a string with hi-intensity black +// foreground. +func HiBlackString(format string, a ...interface{}) string { + return colorString(format, FgHiBlack, a...) +} + +// HiRedString is a convenient helper function to return a string with hi-intensity red +// foreground. +func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) } + +// HiGreenString is a convenient helper function to return a string with hi-intensity green +// foreground. +func HiGreenString(format string, a ...interface{}) string { + return colorString(format, FgHiGreen, a...) +} + +// HiYellowString is a convenient helper function to return a string with hi-intensity yellow +// foreground. +func HiYellowString(format string, a ...interface{}) string { + return colorString(format, FgHiYellow, a...) +} + +// HiBlueString is a convenient helper function to return a string with hi-intensity blue +// foreground. +func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) } + +// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta +// foreground. +func HiMagentaString(format string, a ...interface{}) string { + return colorString(format, FgHiMagenta, a...) +} + +// HiCyanString is a convenient helper function to return a string with hi-intensity cyan +// foreground. +func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) } + +// HiWhiteString is a convenient helper function to return a string with hi-intensity white +// foreground. +func HiWhiteString(format string, a ...interface{}) string { + return colorString(format, FgHiWhite, a...) +} diff --git a/vendor/github.com/fatih/color/doc.go b/vendor/github.com/fatih/color/doc.go new file mode 100644 index 0000000000..04541de786 --- /dev/null +++ b/vendor/github.com/fatih/color/doc.go @@ -0,0 +1,135 @@ +/* +Package color is an ANSI color package to output colorized or SGR defined +output to the standard output. The API can be used in several way, pick one +that suits you. + +Use simple and default helper functions with predefined foreground colors: + + color.Cyan("Prints text in cyan.") + + // a newline will be appended automatically + color.Blue("Prints %s in blue.", "text") + + // More default foreground colors.. + color.Red("We have red") + color.Yellow("Yellow color too!") + color.Magenta("And many others ..") + + // Hi-intensity colors + color.HiGreen("Bright green color.") + color.HiBlack("Bright black means gray..") + color.HiWhite("Shiny white color!") + +However there are times where custom color mixes are required. Below are some +examples to create custom color objects and use the print functions of each +separate color object. + + // Create a new color object + c := color.New(color.FgCyan).Add(color.Underline) + c.Println("Prints cyan text with an underline.") + + // Or just add them to New() + d := color.New(color.FgCyan, color.Bold) + d.Printf("This prints bold cyan %s\n", "too!.") + + + // Mix up foreground and background colors, create new mixes! + red := color.New(color.FgRed) + + boldRed := red.Add(color.Bold) + boldRed.Println("This will print text in bold red.") + + whiteBackground := red.Add(color.BgWhite) + whiteBackground.Println("Red text with White background.") + + // Use your own io.Writer output + color.New(color.FgBlue).Fprintln(myWriter, "blue color!") + + blue := color.New(color.FgBlue) + blue.Fprint(myWriter, "This will print text in blue.") + +You can create PrintXxx functions to simplify even more: + + // Create a custom print function for convenient + red := color.New(color.FgRed).PrintfFunc() + red("warning") + red("error: %s", err) + + // Mix up multiple attributes + notice := color.New(color.Bold, color.FgGreen).PrintlnFunc() + notice("don't forget this...") + +You can also FprintXxx functions to pass your own io.Writer: + + blue := color.New(FgBlue).FprintfFunc() + blue(myWriter, "important notice: %s", stars) + + // Mix up with multiple attributes + success := color.New(color.Bold, color.FgGreen).FprintlnFunc() + success(myWriter, don't forget this...") + + +Or create SprintXxx functions to mix strings with other non-colorized strings: + + yellow := New(FgYellow).SprintFunc() + red := New(FgRed).SprintFunc() + + fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Printf("this %s rocks!\n", info("package")) + +Windows support is enabled by default. All Print functions work as intended. +However only for color.SprintXXX functions, user should use fmt.FprintXXX and +set the output to color.Output: + + fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS")) + + info := New(FgWhite, BgGreen).SprintFunc() + fmt.Fprintf(color.Output, "this %s rocks!\n", info("package")) + +Using with existing code is possible. Just use the Set() method to set the +standard output to the given parameters. That way a rewrite of an existing +code is not required. + + // Use handy standard colors. + color.Set(color.FgYellow) + + fmt.Println("Existing text will be now in Yellow") + fmt.Printf("This one %s\n", "too") + + color.Unset() // don't forget to unset + + // You can mix up parameters + color.Set(color.FgMagenta, color.Bold) + defer color.Unset() // use it in your function + + fmt.Println("All text will be now bold magenta.") + +There might be a case where you want to disable color output (for example to +pipe the standard output of your app to somewhere else). `Color` has support to +disable colors both globally and for single color definition. For example +suppose you have a CLI app and a `--no-color` bool flag. You can easily disable +the color output with: + + var flagNoColor = flag.Bool("no-color", false, "Disable color output") + + if *flagNoColor { + color.NoColor = true // disables colorized output + } + +You can also disable the color by setting the NO_COLOR environment variable to any value. + +It also has support for single color definitions (local). You can +disable/enable color output on the fly: + + c := color.New(color.FgCyan) + c.Println("Prints cyan text") + + c.DisableColor() + c.Println("This is printed without any color") + + c.EnableColor() + c.Println("This prints again cyan...") +*/ +package color diff --git a/vendor/github.com/fatih/color/go.mod b/vendor/github.com/fatih/color/go.mod new file mode 100644 index 0000000000..c9b3cd59a2 --- /dev/null +++ b/vendor/github.com/fatih/color/go.mod @@ -0,0 +1,8 @@ +module github.com/fatih/color + +go 1.13 + +require ( + github.com/mattn/go-colorable v0.1.9 + github.com/mattn/go-isatty v0.0.14 +) diff --git a/vendor/github.com/fatih/color/go.sum b/vendor/github.com/fatih/color/go.sum new file mode 100644 index 0000000000..cbbcfb6446 --- /dev/null +++ b/vendor/github.com/fatih/color/go.sum @@ -0,0 +1,9 @@ +github.com/mattn/go-colorable v0.1.9 h1:sqDoxXbdeALODt0DAeJCVp38ps9ZogZEAXjus69YV3U= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/gdamore/encoding/.appveyor.yml b/vendor/github.com/gdamore/encoding/.appveyor.yml new file mode 100644 index 0000000000..19a4c5ddf9 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/.appveyor.yml @@ -0,0 +1,13 @@ +version: 1.0.{build} +clone_folder: c:\gopath\src\github.com\gdamore\encoding +environment: + GOPATH: c:\gopath +build_script: +- go version +- go env +- SET PATH=%LOCALAPPDATA%\atom\bin;%GOPATH%\bin;%PATH% +- go get -t ./... +- go build +- go install ./... +test_script: +- go test ./... diff --git a/vendor/github.com/gdamore/encoding/.travis.yml b/vendor/github.com/gdamore/encoding/.travis.yml new file mode 100644 index 0000000000..504241380e --- /dev/null +++ b/vendor/github.com/gdamore/encoding/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - 1.9.x + - 1.10.x + - 1.11.x + - tip diff --git a/vendor/github.com/gdamore/encoding/LICENSE b/vendor/github.com/gdamore/encoding/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gdamore/encoding/README.md b/vendor/github.com/gdamore/encoding/README.md new file mode 100644 index 0000000000..3db2b4c58a --- /dev/null +++ b/vendor/github.com/gdamore/encoding/README.md @@ -0,0 +1,19 @@ +## encoding + +[![Linux Status](https://img.shields.io/travis/gdamore/encoding.svg?label=linux)](https://travis-ci.org/gdamore/encoding) +[![Windows Status](https://img.shields.io/appveyor/ci/gdamore/encoding.svg?label=windows)](https://ci.appveyor.com/project/gdamore/encoding) +[![Apache License](https://img.shields.io/badge/license-APACHE2-blue.svg)](https://github.com/gdamore/encoding/blob/master/LICENSE) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/gdamore/encoding) +[![Go Report Card](http://goreportcard.com/badge/gdamore/encoding)](http://goreportcard.com/report/gdamore/encoding) + +Package encoding provides a number of encodings that are missing from the +standard Go [encoding]("https://godoc.org/golang.org/x/text/encoding") package. + +We hope that we can contribute these to the standard Go library someday. It +turns out that some of these are useful for dealing with I/O streams coming +from non-UTF friendly sources. + +The UTF8 Encoder is also useful for situations where valid UTF-8 might be +carried in streams that contain non-valid UTF; in particular I use it for +helping me cope with terminals that embed escape sequences in otherwise +valid UTF-8. diff --git a/vendor/github.com/gdamore/encoding/ascii.go b/vendor/github.com/gdamore/encoding/ascii.go new file mode 100644 index 0000000000..b7321f4334 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/ascii.go @@ -0,0 +1,36 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// ASCII represents the 7-bit US-ASCII scheme. It decodes directly to +// UTF-8 without change, as all ASCII values are legal UTF-8. +// Unicode values less than 128 (i.e. 7 bits) map 1:1 with ASCII. +// It encodes runes outside of that to 0x1A, the ASCII substitution character. +var ASCII encoding.Encoding + +func init() { + amap := make(map[byte]rune) + for i := 128; i <= 255; i++ { + amap[byte(i)] = RuneError + } + + cm := &Charmap{Map: amap} + cm.Init() + ASCII = cm +} diff --git a/vendor/github.com/gdamore/encoding/charmap.go b/vendor/github.com/gdamore/encoding/charmap.go new file mode 100644 index 0000000000..db1c33ef7f --- /dev/null +++ b/vendor/github.com/gdamore/encoding/charmap.go @@ -0,0 +1,196 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "sync" + "unicode/utf8" + + "golang.org/x/text/encoding" + "golang.org/x/text/transform" +) + +const ( + // RuneError is an alias for the UTF-8 replacement rune, '\uFFFD'. + RuneError = '\uFFFD' + + // RuneSelf is the rune below which UTF-8 and the Unicode values are + // identical. Its also the limit for ASCII. + RuneSelf = 0x80 + + // ASCIISub is the ASCII substitution character. + ASCIISub = '\x1a' +) + +// Charmap is a structure for setting up encodings for 8-bit character sets, +// for transforming between UTF8 and that other character set. It has some +// ideas borrowed from golang.org/x/text/encoding/charmap, but it uses a +// different implementation. This implementation uses maps, and supports +// user-defined maps. +// +// We do assume that a character map has a reasonable substitution character, +// and that valid encodings are stable (exactly a 1:1 map) and stateless +// (that is there is no shift character or anything like that.) Hence this +// approach will not work for many East Asian character sets. +// +// Measurement shows little or no measurable difference in the performance of +// the two approaches. The difference was down to a couple of nsec/op, and +// no consistent pattern as to which ran faster. With the conversion to +// UTF-8 the code takes about 25 nsec/op. The conversion in the reverse +// direction takes about 100 nsec/op. (The larger cost for conversion +// from UTF-8 is most likely due to the need to convert the UTF-8 byte stream +// to a rune before conversion. +// +type Charmap struct { + transform.NopResetter + bytes map[rune]byte + runes [256][]byte + once sync.Once + + // The map between bytes and runes. To indicate that a specific + // byte value is invalid for a charcter set, use the rune + // utf8.RuneError. Values that are absent from this map will + // be assumed to have the identity mapping -- that is the default + // is to assume ISO8859-1, where all 8-bit characters have the same + // numeric value as their Unicode runes. (Not to be confused with + // the UTF-8 values, which *will* be different for non-ASCII runes.) + // + // If no values less than RuneSelf are changed (or have non-identity + // mappings), then the character set is assumed to be an ASCII + // superset, and certain assumptions and optimizations become + // available for ASCII bytes. + Map map[byte]rune + + // The ReplacementChar is the byte value to use for substitution. + // It should normally be ASCIISub for ASCII encodings. This may be + // unset (left to zero) for mappings that are strictly ASCII supersets. + // In that case ASCIISub will be assumed instead. + ReplacementChar byte +} + +type cmapDecoder struct { + transform.NopResetter + runes [256][]byte +} + +type cmapEncoder struct { + transform.NopResetter + bytes map[rune]byte + replace byte +} + +// Init initializes internal values of a character map. This should +// be done early, to minimize the cost of allocation of transforms +// later. It is not strictly necessary however, as the allocation +// functions will arrange to call it if it has not already been done. +func (c *Charmap) Init() { + c.once.Do(c.initialize) +} + +func (c *Charmap) initialize() { + c.bytes = make(map[rune]byte) + ascii := true + + for i := 0; i < 256; i++ { + r, ok := c.Map[byte(i)] + if !ok { + r = rune(i) + } + if r < 128 && r != rune(i) { + ascii = false + } + if r != RuneError { + c.bytes[r] = byte(i) + } + utf := make([]byte, utf8.RuneLen(r)) + utf8.EncodeRune(utf, r) + c.runes[i] = utf + } + if ascii && c.ReplacementChar == '\x00' { + c.ReplacementChar = ASCIISub + } +} + +// NewDecoder returns a Decoder the converts from the 8-bit +// character set to UTF-8. Unknown mappings, if any, are mapped +// to '\uFFFD'. +func (c *Charmap) NewDecoder() *encoding.Decoder { + c.Init() + return &encoding.Decoder{Transformer: &cmapDecoder{runes: c.runes}} +} + +// NewEncoder returns a Transformer that converts from UTF8 to the +// 8-bit character set. Unknown mappings are mapped to 0x1A. +func (c *Charmap) NewEncoder() *encoding.Encoder { + c.Init() + return &encoding.Encoder{ + Transformer: &cmapEncoder{ + bytes: c.bytes, + replace: c.ReplacementChar, + }, + } +} + +func (d *cmapDecoder) Transform(dst, src []byte, atEOF bool) (int, int, error) { + var e error + var ndst, nsrc int + + for _, c := range src { + b := d.runes[c] + l := len(b) + + if ndst+l > len(dst) { + e = transform.ErrShortDst + break + } + for i := 0; i < l; i++ { + dst[ndst] = b[i] + ndst++ + } + nsrc++ + } + return ndst, nsrc, e +} + +func (d *cmapEncoder) Transform(dst, src []byte, atEOF bool) (int, int, error) { + var e error + var ndst, nsrc int + for nsrc < len(src) { + if ndst >= len(dst) { + e = transform.ErrShortDst + break + } + + r, sz := utf8.DecodeRune(src[nsrc:]) + if r == utf8.RuneError && sz == 1 { + // If its inconclusive due to insufficient data in + // in the source, report it + if !atEOF && !utf8.FullRune(src[nsrc:]) { + e = transform.ErrShortSrc + break + } + } + + if c, ok := d.bytes[r]; ok { + dst[ndst] = c + } else { + dst[ndst] = d.replace + } + nsrc += sz + ndst++ + } + + return ndst, nsrc, e +} diff --git a/vendor/github.com/gdamore/encoding/doc.go b/vendor/github.com/gdamore/encoding/doc.go new file mode 100644 index 0000000000..8a7b48d7ee --- /dev/null +++ b/vendor/github.com/gdamore/encoding/doc.go @@ -0,0 +1,17 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package encoding provides a few of the encoding structures that are +// missing from the Go x/text/encoding tree. +package encoding diff --git a/vendor/github.com/gdamore/encoding/ebcdic.go b/vendor/github.com/gdamore/encoding/ebcdic.go new file mode 100644 index 0000000000..8e13f1a97f --- /dev/null +++ b/vendor/github.com/gdamore/encoding/ebcdic.go @@ -0,0 +1,273 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// EBCDIC represents the 8-bit EBCDIC scheme, found in some mainframe +// environments. If you don't know what this is, consider yourself lucky. +var EBCDIC encoding.Encoding + +func init() { + cm := &Charmap{ + ReplacementChar: '\x3f', + Map: map[byte]rune{ + // 0x00-0x03 match + 0x04: RuneError, + 0x05: '\t', + 0x06: RuneError, + 0x07: '\x7f', + 0x08: RuneError, + 0x09: RuneError, + 0x0a: RuneError, + // 0x0b-0x13 match + 0x14: RuneError, + 0x15: '\x85', // Not in any ISO code + 0x16: '\x08', + 0x17: RuneError, + // 0x18-0x19 match + 0x1a: RuneError, + 0x1b: RuneError, + // 0x1c-0x1f match + 0x20: RuneError, + 0x21: RuneError, + 0x22: RuneError, + 0x23: RuneError, + 0x24: RuneError, + 0x25: '\n', + 0x26: '\x17', + 0x27: '\x1b', + 0x28: RuneError, + 0x29: RuneError, + 0x2a: RuneError, + 0x2b: RuneError, + 0x2c: RuneError, + 0x2d: '\x05', + 0x2e: '\x06', + 0x2f: '\x07', + 0x30: RuneError, + 0x31: RuneError, + 0x32: '\x16', + 0x33: RuneError, + 0x34: RuneError, + 0x35: RuneError, + 0x36: RuneError, + 0x37: '\x04', + 0x38: RuneError, + 0x39: RuneError, + 0x3a: RuneError, + 0x3b: RuneError, + 0x3c: '\x14', + 0x3d: '\x15', + 0x3e: RuneError, + 0x3f: '\x1a', // also replacement char + 0x40: ' ', + 0x41: '\xa0', + 0x42: RuneError, + 0x43: RuneError, + 0x44: RuneError, + 0x45: RuneError, + 0x46: RuneError, + 0x47: RuneError, + 0x48: RuneError, + 0x49: RuneError, + 0x4a: RuneError, + 0x4b: '.', + 0x4c: '<', + 0x4d: '(', + 0x4e: '+', + 0x4f: '|', + 0x50: '&', + 0x51: RuneError, + 0x52: RuneError, + 0x53: RuneError, + 0x54: RuneError, + 0x55: RuneError, + 0x56: RuneError, + 0x57: RuneError, + 0x58: RuneError, + 0x59: RuneError, + 0x5a: '!', + 0x5b: '$', + 0x5c: '*', + 0x5d: ')', + 0x5e: ';', + 0x5f: '¬', + 0x60: '-', + 0x61: '/', + 0x62: RuneError, + 0x63: RuneError, + 0x64: RuneError, + 0x65: RuneError, + 0x66: RuneError, + 0x67: RuneError, + 0x68: RuneError, + 0x69: RuneError, + 0x6a: '¦', + 0x6b: ',', + 0x6c: '%', + 0x6d: '_', + 0x6e: '>', + 0x6f: '?', + 0x70: RuneError, + 0x71: RuneError, + 0x72: RuneError, + 0x73: RuneError, + 0x74: RuneError, + 0x75: RuneError, + 0x76: RuneError, + 0x77: RuneError, + 0x78: RuneError, + 0x79: '`', + 0x7a: ':', + 0x7b: '#', + 0x7c: '@', + 0x7d: '\'', + 0x7e: '=', + 0x7f: '"', + 0x80: RuneError, + 0x81: 'a', + 0x82: 'b', + 0x83: 'c', + 0x84: 'd', + 0x85: 'e', + 0x86: 'f', + 0x87: 'g', + 0x88: 'h', + 0x89: 'i', + 0x8a: RuneError, + 0x8b: RuneError, + 0x8c: RuneError, + 0x8d: RuneError, + 0x8e: RuneError, + 0x8f: '±', + 0x90: RuneError, + 0x91: 'j', + 0x92: 'k', + 0x93: 'l', + 0x94: 'm', + 0x95: 'n', + 0x96: 'o', + 0x97: 'p', + 0x98: 'q', + 0x99: 'r', + 0x9a: RuneError, + 0x9b: RuneError, + 0x9c: RuneError, + 0x9d: RuneError, + 0x9e: RuneError, + 0x9f: RuneError, + 0xa0: RuneError, + 0xa1: '~', + 0xa2: 's', + 0xa3: 't', + 0xa4: 'u', + 0xa5: 'v', + 0xa6: 'w', + 0xa7: 'x', + 0xa8: 'y', + 0xa9: 'z', + 0xaa: RuneError, + 0xab: RuneError, + 0xac: RuneError, + 0xad: RuneError, + 0xae: RuneError, + 0xaf: RuneError, + 0xb0: '^', + 0xb1: RuneError, + 0xb2: RuneError, + 0xb3: RuneError, + 0xb4: RuneError, + 0xb5: RuneError, + 0xb6: RuneError, + 0xb7: RuneError, + 0xb8: RuneError, + 0xb9: RuneError, + 0xba: '[', + 0xbb: ']', + 0xbc: RuneError, + 0xbd: RuneError, + 0xbe: RuneError, + 0xbf: RuneError, + 0xc0: '{', + 0xc1: 'A', + 0xc2: 'B', + 0xc3: 'C', + 0xc4: 'D', + 0xc5: 'E', + 0xc6: 'F', + 0xc7: 'G', + 0xc8: 'H', + 0xc9: 'I', + 0xca: '\xad', // NB: soft hyphen + 0xcb: RuneError, + 0xcc: RuneError, + 0xcd: RuneError, + 0xce: RuneError, + 0xcf: RuneError, + 0xd0: '}', + 0xd1: 'J', + 0xd2: 'K', + 0xd3: 'L', + 0xd4: 'M', + 0xd5: 'N', + 0xd6: 'O', + 0xd7: 'P', + 0xd8: 'Q', + 0xd9: 'R', + 0xda: RuneError, + 0xdb: RuneError, + 0xdc: RuneError, + 0xdd: RuneError, + 0xde: RuneError, + 0xdf: RuneError, + 0xe0: '\\', + 0xe1: '\u2007', // Non-breaking space + 0xe2: 'S', + 0xe3: 'T', + 0xe4: 'U', + 0xe5: 'V', + 0xe6: 'W', + 0xe7: 'X', + 0xe8: 'Y', + 0xe9: 'Z', + 0xea: RuneError, + 0xeb: RuneError, + 0xec: RuneError, + 0xed: RuneError, + 0xee: RuneError, + 0xef: RuneError, + 0xf0: '0', + 0xf1: '1', + 0xf2: '2', + 0xf3: '3', + 0xf4: '4', + 0xf5: '5', + 0xf6: '6', + 0xf7: '7', + 0xf8: '8', + 0xf9: '9', + 0xfa: RuneError, + 0xfb: RuneError, + 0xfc: RuneError, + 0xfd: RuneError, + 0xfe: RuneError, + 0xff: RuneError, + }} + cm.Init() + EBCDIC = cm +} diff --git a/vendor/github.com/gdamore/encoding/go.mod b/vendor/github.com/gdamore/encoding/go.mod new file mode 100644 index 0000000000..e91b30d5a6 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/go.mod @@ -0,0 +1,5 @@ +module github.com/gdamore/encoding + +go 1.9 + +require golang.org/x/text v0.3.0 diff --git a/vendor/github.com/gdamore/encoding/go.sum b/vendor/github.com/gdamore/encoding/go.sum new file mode 100644 index 0000000000..6bad37b2a7 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/go.sum @@ -0,0 +1,2 @@ +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/gdamore/encoding/latin1.go b/vendor/github.com/gdamore/encoding/latin1.go new file mode 100644 index 0000000000..226bf01d0f --- /dev/null +++ b/vendor/github.com/gdamore/encoding/latin1.go @@ -0,0 +1,33 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// ISO8859_1 represents the 8-bit ISO8859-1 scheme. It decodes directly to +// UTF-8 without change, as all ISO8859-1 values are legal UTF-8. +// Unicode values less than 256 (i.e. 8 bits) map 1:1 with 8859-1. +// It encodes runes outside of that to 0x1A, the ASCII substitution character. +var ISO8859_1 encoding.Encoding + +func init() { + cm := &Charmap{} + cm.Init() + + // 8859-1 is the 8-bit identity map for Unicode. + ISO8859_1 = cm +} diff --git a/vendor/github.com/gdamore/encoding/latin5.go b/vendor/github.com/gdamore/encoding/latin5.go new file mode 100644 index 0000000000..c75ecf2753 --- /dev/null +++ b/vendor/github.com/gdamore/encoding/latin5.go @@ -0,0 +1,35 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +// ISO8859_9 represents the 8-bit ISO8859-9 scheme. +var ISO8859_9 encoding.Encoding + +func init() { + cm := &Charmap{Map: map[byte]rune{ + 0xD0: 'Ğ', + 0xDD: 'İ', + 0xDE: 'Ş', + 0xF0: 'ğ', + 0xFD: 'ı', + 0xFE: 'ş', + }} + cm.Init() + ISO8859_9 = cm +} diff --git a/vendor/github.com/gdamore/encoding/utf8.go b/vendor/github.com/gdamore/encoding/utf8.go new file mode 100644 index 0000000000..2d59f4b39d --- /dev/null +++ b/vendor/github.com/gdamore/encoding/utf8.go @@ -0,0 +1,35 @@ +// Copyright 2015 Garrett D'Amore +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package encoding + +import ( + "golang.org/x/text/encoding" +) + +type validUtf8 struct{} + +// UTF8 is an encoding for UTF-8. All it does is verify that the UTF-8 +// in is valid. The main reason for its existence is that it will detect +// and report ErrSrcShort or ErrDstShort, whereas the Nop encoding just +// passes every byte, blithely. +var UTF8 encoding.Encoding = validUtf8{} + +func (validUtf8) NewDecoder() *encoding.Decoder { + return &encoding.Decoder{Transformer: encoding.UTF8Validator} +} + +func (validUtf8) NewEncoder() *encoding.Encoder { + return &encoding.Encoder{Transformer: encoding.UTF8Validator} +} diff --git a/vendor/github.com/gdamore/tcell/v2/.appveyor.yml b/vendor/github.com/gdamore/tcell/v2/.appveyor.yml new file mode 100644 index 0000000000..435dfe3a83 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/.appveyor.yml @@ -0,0 +1,13 @@ +version: 1.0.{build} +clone_folder: c:\gopath\src\github.com\gdamore\tcell +environment: + GOPATH: c:\gopath +build_script: +- go version +- go env +- SET PATH=%LOCALAPPDATA%\atom\bin;%GOPATH%\bin;%PATH% +- go get -t ./... +- go build +- go install ./... +test_script: +- go test ./... diff --git a/vendor/github.com/gdamore/tcell/v2/.gitignore b/vendor/github.com/gdamore/tcell/v2/.gitignore new file mode 100644 index 0000000000..c57100a595 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/.gitignore @@ -0,0 +1 @@ +coverage.txt diff --git a/vendor/github.com/gdamore/tcell/v2/.travis.yml b/vendor/github.com/gdamore/tcell/v2/.travis.yml new file mode 100644 index 0000000000..967b5b3357 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.15.x + - master + +arch: + - amd64 + - ppc64le + +before_install: + - go get -t -v ./... + +script: + - go test -race -coverprofile=coverage.txt -covermode=atomic + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/gdamore/tcell/v2/AUTHORS b/vendor/github.com/gdamore/tcell/v2/AUTHORS new file mode 100644 index 0000000000..53f87ee639 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/AUTHORS @@ -0,0 +1,4 @@ +Garrett D'Amore +Zachary Yedidia +Junegunn Choi +Staysail Systems, Inc. diff --git a/vendor/github.com/gdamore/tcell/v2/CHANGESv2.md b/vendor/github.com/gdamore/tcell/v2/CHANGESv2.md new file mode 100644 index 0000000000..ad97c11b5b --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/CHANGESv2.md @@ -0,0 +1,82 @@ +## Breaking Changes in _Tcell_ v2 + +A number of changes were made to _Tcell_ for version two, and some of these are breaking. + +### Import Path + +The import path for tcell has changed to `github.com/gdamore/tcell/v2` to reflect a new major version. + +### Style Is Not Numeric + +The type `Style` has changed to a structure, to allow us to add additional data such as flags for color setting, +more attribute bits, and so forth. +Applications that relied on this being a number will need to be updated to use the accessor methods. + +### Mouse Event Changes + +The middle mouse button was reported as button 2 on Linux, but as button 3 on Windows, +and the right mouse button was reported the reverse way. +_Tcell_ now always reports the right mouse button as button 2, and the middle button as button 3. +To help make this clearer, new symbols `ButtonPrimary`, `ButtonSecondary`, and +`ButtonMiddle` are provided. +(Note that which button is right vs. left may be impacted by user preferences. +Usually the left button will be considered the Primary, and the right will be the Secondary.) +Applications may need to adjust their handling of mouse buttons 2 and 3 accordingly. + +### Terminals Removed + +A number of terminals have been removed. +These are mostly ancient definitions unlikely to be used by anyone, such as `adm3a`. + +### High Number Function Keys + +Historically terminfo reported function keys with modifiers set as a different +function key altogether. For example, Shift-F1 was reported as F13 on XTerm. +_Tcell_ now prefers to report these using the base key (such as F1) with modifiers added. +This works on XTerm and VTE based emulators, but some emulators may not support this. +The new behavior more closely aligns with behavior on Windows platforms. + +## New Features in _Tcell_ v2 + +These features are not breaking, but are introduced in version 2. + +### Improved Modifier Support + +For terminals that appear to behave like the venerable XTerm, _tcell_ +automatically adds modifier reporting for ALT, CTRL, SHIFT, and META keys +when the terminal reports them. + +### Better Support for Palettes (Themes) + +When using a color by its name or palette entry, _Tcell_ now tries to +use that palette entry as is; this should avoid some inconsistency and respect +terminal themes correctly. + +When true fidelity to RGB values is needed, the new `TrueColor()` API can be used +to create a direct color, which bypasses the palette altogether. + +### Automatic TrueColor Detection + +For some terminals, if the `Tc` or `RGB` properties are present in terminfo, +_Tcell_ will automatically assume the terminal supports 24-bit color. + +### ColorReset + +A new color value, `ColorReset` can be used on the foreground or background +to reset the color the default used by the terminal. + +### tmux Support + +_Tcell_ now has improved support for tmux, when the `$TERM` variable is set to "tmux". + +### Strikethrough Support + +_Tcell_ has support for strikethrough when the terminal supports it, using the new `StrikeThrough()` API. + +### Bracketed Paste Support + +_Tcell_ provides the long requested capability to discriminate paste event by using the +bracketed-paste capability present in some terminals. This is automatically available on +terminals that support XTerm style mouse handling, but applications must opt-in to this +by using the new `EnablePaste()` function. A new `EventPaste` type of event will be +delivered when starting and finishing a paste operation. \ No newline at end of file diff --git a/vendor/github.com/gdamore/tcell/v2/LICENSE b/vendor/github.com/gdamore/tcell/v2/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/gdamore/tcell/v2/README.md b/vendor/github.com/gdamore/tcell/v2/README.md new file mode 100644 index 0000000000..b7727e081a --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/README.md @@ -0,0 +1,272 @@ + +# ![Tcell](logos/tcell.png) + +_Tcell_ is a _Go_ package that provides a cell based view for text terminals, like _XTerm_. +It was inspired by _termbox_, but includes many additional improvements. + +[![Linux Status](https://img.shields.io/travis/gdamore/tcell.svg?label=linux)](https://travis-ci.org/gdamore/tcell) +[![Windows Status](https://img.shields.io/appveyor/ci/gdamore/tcell.svg?label=windows)](https://ci.appveyor.com/project/gdamore/tcell) +[![Apache License](https://img.shields.io/badge/license-APACHE2-blue.svg)](https://github.com/gdamore/tcell/blob/master/LICENSE) +[![Documentation](https://img.shields.io/badge/godoc-reference-blue.svg)](https://pkg.go.dev/github.com/gdamore/tcell/v2) +[![Report Card](https://goreportcard.com/badge/gdamore/tcell)](http://goreportcard.com/report/gdamore/tcell/v2) +[![Discord](https://img.shields.io/discord/639503822733180969?label=discord)](https://discord.gg/urTTxDN) +[![Coverage](https://codecov.io/gh/gdamore/tcell/branch/master/graph/badge.svg)](https://codecov.io/gh/gdamore/tcell) + + +NOTE: This is version 2 of _Tcell_. There are breaking changes relative to version 1. +Version 1.x remains available using the import `github.com/gdamore/tcell`. + +## Tutorial + +A brief, and still somewhat rough, [tutorial](TUTORIAL.md) is available. +## Examples + +* [proxima5](https://github.com/gdamore/proxima5) - space shooter ([video](https://youtu.be/jNxKTCmY_bQ)) +* [govisor](https://github.com/gdamore/govisor) - service management UI ([screenshot](http://2.bp.blogspot.com/--OsvnfzSNow/Vf7aqMw3zXI/AAAAAAAAARo/uOMtOvw4Sbg/s1600/Screen%2BShot%2B2015-09-20%2Bat%2B9.08.41%2BAM.png)) +* mouse demo - included mouse test ([screenshot](http://2.bp.blogspot.com/-fWvW5opT0es/VhIdItdKqJI/AAAAAAAAATE/7Ojc0L1SpB0/s1600/Screen%2BShot%2B2015-10-04%2Bat%2B11.47.13%2BPM.png)) +* [gomatrix](https://github.com/gdamore/gomatrix) - converted from Termbox +* [micro](https://github.com/zyedidia/micro/) - lightweight text editor with syntax-highlighting and themes +* [godu](https://github.com/viktomas/godu) - utility to discover large files/folders +* [tview](https://github.com/rivo/tview/) - rich interactive widgets +* [cview](https://code.rocketnine.space/tslocum/cview) - user interface toolkit (fork of _tview_) +* [awsome gocui](https://github.com/awesome-gocui/gocui) - Go Console User Interface +* [gomandelbrot](https://github.com/rgm3/gomandelbrot) - Mandelbrot! +* [WTF](https://github.com/senorprogrammer/wtf) - personal information dashboard +* [browsh](https://github.com/browsh-org/browsh) - modern web browser ([video](https://www.youtube.com/watch?v=HZq86XfBoRo)) +* [go-life](https://github.com/sachaos/go-life) - Conway's Game of Life +* [gowid](https://github.com/gcla/gowid) - compositional widgets for terminal UIs, inspired by _urwid_ +* [termshark](https://termshark.io) - interface for _tshark_, inspired by Wireshark, built on _gowid_ +* [go-tetris](https://github.com/MichaelS11/go-tetris) - Go Tetris with AI option +* [fzf](https://github.com/junegunn/fzf) - command-line fuzzy finder +* [ascii-fluid](https://github.com/esimov/ascii-fluid) - fluid simulation controlled by webcam +* [cbind](https://code.rocketnine.space/tslocum/cbind) - key event encoding, decoding and handling +* [tpong](https://github.com/spinzed/tpong) - old-school Pong +* [aerc](https://git.sr.ht/~sircmpwn/aerc) - email client +* [tblogs](https://github.com/ezeoleaf/tblogs) - development blogs reader +* [spinc](https://github.com/lallassu/spinc) - _irssi_ inspired chat application for Cisco Spark/WebEx +* [gorss](https://github.com/lallassu/gorss) - RSS/Atom feed reader +* [memoryalike](https://github.com/Bios-Marcel/memoryalike) - memorization game +* [lf](https://github.com/gokcehan/lf) - file manager +* [gokeybr](https://github.com/bunyk/gokeybr) - deliberately practice your typing +* [gonano](https://github.com/jbaramidze/gonano) - editor, mimics _nano_ +* [uchess](https://github.com/tmountain/uchess) - UCI chess client +* [min](https://github.com/a-h/min) - Gemini browser +* [ov](https://github.com/noborus/ov) - file pager +* [tmux-wormhole](https://github.com/gcla/tmux-wormhole) - _tmux_ plugin to transfer files +* [gruid-tcell](https://github.com/anaseto/gruid-tcell) - driver for the grid based UI and game framework +* [aretext](https://github.com/aretext/aretext) - minimalist text editor with _vim_ key bindings + +## Pure Go Terminfo Database + +_Tcell_ includes a full parser and expander for terminfo capability strings, +so that it can avoid hard coding escape strings for formatting. It also favors +portability, and includes support for all POSIX systems. + +The database is also flexible & extensible, and can be modified by either running +a program to build the entire database, or an entry for just a single terminal. + +## More Portable + +_Tcell_ is portable to a wide variety of systems, and is pure Go, without +any need for CGO. +_Tcell_ is believed to work with mainstream systems officially supported by golang. + +## No Async IO + +_Tcell_ is able to operate without requiring `SIGIO` signals (unlike _termbox_), +or asynchronous I/O, and can instead use standard Go file objects and Go routines. +This means it should be safe, especially for +use with programs that use exec, or otherwise need to manipulate the tty streams. +This model is also much closer to idiomatic Go, leading to fewer surprises. + +## Rich Unicode & non-Unicode support + +_Tcell_ includes enhanced support for Unicode, including wide characters and +combining characters, provided your terminal can support them. +Note that +Windows terminals generally don't support the full Unicode repertoire. + +It will also convert to and from Unicode locales, so that the program +can work with UTF-8 internally, and get reasonable output in other locales. +_Tcell_ tries hard to convert to native characters on both input and output. +On output _Tcell_ even makes use of the alternate character set to facilitate +drawing certain characters. + +## More Function Keys + +_Tcell_ also has richer support for a larger number of special keys that some +terminals can send. + +## Better Color Handling + +_Tcell_ will respect your terminal's color space as specified within your terminfo entries. +For example attempts to emit color sequences on VT100 terminals +won't result in unintended consequences. + +In legacy Windows mode, _Tcell_ supports 16 colors, bold, dim, and reverse, +instead of just termbox's 8 colors with reverse. (Note that there is some +conflation with bold/dim and colors.) +Modern Windows 10 can benefit from much richer colors however. + +_Tcell_ maps 16 colors down to 8, for terminals that need it. +(The upper 8 colors are just brighter versions of the lower 8.) + +## Better Mouse Support + +_Tcell_ supports enhanced mouse tracking mode, so your application can receive +regular mouse motion events, and wheel events, if your terminal supports it. + +(Note: The Windows 10 Terminal application suffers from a flaw in this regard, +and does not support mouse interaction. The stock Windows 10 console host +fired up with cmd.exe or PowerShell works fine however.) + +## _Termbox_ Compatibility + +A compatibility layer for _termbox_ is provided in the `compat` directory. +To use it, try importing `github.com/gdamore/tcell/termbox` instead. +Most _termbox-go_ programs will probably work without further modification. + +## Working With Unicode + +Internally _Tcell_ uses UTF-8, just like Go. +However, _Tcell_ understands how to +convert to and from other character sets, using the capabilities of +the `golang.org/x/text/encoding packages`. +Your application must supply +them, as the full set of the most common ones bloats the program by about 2 MB. +If you're lazy, and want them all anyway, see the `encoding` sub-directory. + +## Wide & Combining Characters + +The `SetContent()` API takes a primary rune, and an optional list of combining runes. +If any of the runes is a wide (East Asian) rune occupying two cells, +then the library will skip output from the following cell. Care must be +taken in the application to avoid explicitly attempting to set content in the +next cell, otherwise the results are undefined. (Normally the wide character +is displayed, and the other character is not; do not depend on that behavior.) + +Older terminal applications (especially on systems like Windows 8) lack support +for advanced Unicode, and thus may not fare well. + +## Colors + +_Tcell_ assumes the ANSI/XTerm color model, including the 256 color map that +XTerm uses when it supports 256 colors. The terminfo guidance will be +honored, with respect to the number of colors supported. Also, only +terminals which expose ANSI style `setaf` and `setab` will support color; +if you have a color terminal that only has `setf` and `setb`, please submit +a ticket. + +## 24-bit Color + +_Tcell_ _supports 24-bit color!_ (That is, if your terminal can support it.) + +NOTE: Technically the approach of using 24-bit RGB values for color is more +accurately described as "direct color", but most people use the term "true color". +We follow the (inaccurate) common convention. + +There are a few ways you can enable (or disable) true color. + +* For many terminals, we can detect it automatically if your terminal +includes the `RGB` or `Tc` capabilities (or rather it did when the database +was updated.) + +* You can force this one by setting the `COLORTERM` environment variable to +`24-bit`, `truecolor` or `24bit`. This is the same method used +by most other terminal applications that support 24-bit color. + +* If you set your `TERM` environment variable to a value with the suffix `-truecolor` +then 24-bit color compatible with XTerm and ECMA-48 will be assumed. +(This feature is deprecated. +It is recommended to use one of other methods listed above.) + +* You can disable 24-bit color by setting `TCELL_TRUECOLOR=disable` in your +environment. + +When using TrueColor, programs will display the colors that the programmer +intended, overriding any "`themes`" you may have set in your terminal +emulator. (For some cases, accurate color fidelity is more important +than respecting themes. For other cases, such as typical text apps that +only use a few colors, its more desirable to respect the themes that +the user has established.) + +## Performance + +Reasonable attempts have been made to minimize sending data to terminals, +avoiding repeated sequences or drawing the same cell on refresh updates. + +## Terminfo + +(Not relevant for Windows users.) + +The Terminfo implementation operates with a built-in database. +This should satisfy most users. However, it can also (on systems +with ncurses installed), dynamically parse the output from `infocmp` +for terminals it does not already know about. + +See the `terminfo/` directory for more information about generating +new entries for the built-in database. + +_Tcell_ requires that the terminal support the `cup` mode of cursor addressing. +Ancient terminals without the ability to position the cursor directly +are not supported. +This is unlikely to be a problem; such terminals have not been mass-produced +since the early 1970s. + +## Mouse Support + +Mouse support is detected via the `kmous` terminfo variable, however, +enablement/disablement and decoding mouse events is done using hard coded +sequences based on the XTerm X11 model. All popular +terminals with mouse tracking support this model. (Full terminfo support +is not possible as terminfo sequences are not defined.) + +On Windows, the mouse works normally. + +Mouse wheel buttons on various terminals are known to work, but the support +in terminal emulators, as well as support for various buttons and +live mouse tracking, varies widely. +Modern _xterm_, macOS _Terminal_, and _iTerm_ all work well. + +## Bracketed Paste + +Terminals that appear to support the XTerm mouse model also can support +bracketed paste, for applications that opt-in. See `EnablePaste()` for details. + +## Testability + +There is a `SimulationScreen`, that can be used to simulate a real screen +for automated testing. The supplied tests do this. The simulation contains +event delivery, screen resizing support, and capabilities to inject events +and examine "`physical`" screen contents. + +## Platforms + +### POSIX (Linux, FreeBSD, macOS, Solaris, etc.) + +Everything works using pure Go on mainstream platforms. Some more esoteric +platforms (e.g., AIX) may need to be added. Pull requests are welcome! + +### Windows + +Windows console mode applications are supported. + +Modern console applications like ConEmu and the Windows 10 terminal, +support all the good features (resize, mouse tracking, etc.) + +### Plan9, WASM, and others + +These platforms won't work, but compilation stubs are supplied +for folks that want to include parts of this in software for those +platforms. The Simulation screen works, but as _Tcell_ doesn't know how to +allocate a real screen object on those platforms, `NewScreen()` will fail. + +If anyone has wisdom about how to improve support for these, +please let me know. PRs are especially welcome. + +### Commercial Support + +_Tcell_ is absolutely free, but if you want to obtain commercial, professional support, there are options. + +* [TideLift](https://tidelift.com/) subscriptions include support for _Tcell_, as well as many other open source packages. +* [Staysail Systems Inc.](mailto:info@staysail.tech) offers direct support, and custom development around _Tcell_ on an hourly basis. diff --git a/vendor/github.com/gdamore/tcell/v2/TUTORIAL.md b/vendor/github.com/gdamore/tcell/v2/TUTORIAL.md new file mode 100644 index 0000000000..8d14f33115 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/TUTORIAL.md @@ -0,0 +1,293 @@ +# _Tcell_ Tutorial + +_Tcell_ provides a low-level, portable API for building terminal-based programs. +A [terminal emulator](https://en.wikipedia.org/wiki/Terminal_emulator) +(or a real terminal such as a DEC VT-220) is used to interact with such a program. + +_Tcell_'s interface is fairly low-level. +While it provides a reasonably portable way of dealing with all the usual terminal +features, it may be easier to utilize a higher level framework. +A number of such frameworks are listed on the _Tcell_ main [README](README.md). + +This tutorial provides the details of _Tcell_, and is appropriate for developers +wishing to create their own application frameworks or needing more direct access +to the terminal capabilities. + +## Resize events + +Applications receive an event of type `EventResize` when they are first initialized and each time the terminal is resized. +The new size is available as `Size`. + +```golang +switch ev := ev.(type) { +case *tcell.EventResize: + w, h := ev.Size() + logMessage(fmt.Sprintf("Resized to %dx%d", w, h)) +} +``` + +## Key events + +When a key is pressed, applications receive an event of type `EventKey`. +This event describes the modifier keys pressed (if any) and the pressed key or rune. + +When a rune key is pressed, an event with its `Key` set to `KeyRune` is dispatched. + +When a non-rune key is pressed, it is available as the `Key` of the event. + +```golang +switch ev := ev.(type) { +case *tcell.EventKey: + mod, key, ch := ev.Mod(), ev.Key(), ev.Rune() + logMessage(fmt.Sprintf("EventKey Modifiers: %d Key: %d Rune: %d", mod, key, ch)) +} +``` + +### Key event restrictions + +Terminal-based programs have less visibility into keyboard activity than graphical applications. + +When a key is pressed and held, additional key press events are sent by the terminal emulator. +The rate of these repeated events depends on the emulator's configuration. +Key release events are not available. + +It is not possible to distinguish runes typed while holding shift and runes typed using caps lock. +Capital letters are reported without the Shift modifier. + +## Mouse events + +Applications receive an event of type `EventMouse` when the mouse moves, or a mouse button is pressed or released. +Mouse events are only delivered if +`EnableMouse` has been called. + +The mouse buttons being pressed (if any) are available as `Buttons`, and the position of the mouse is available as `Position`. + +```golang +switch ev := ev.(type) { +case *tcell.EventMouse: + mod := ev.Modifiers() + btns := ev.Buttons() + x, y := ev.Position() + logMessage(fmt.Sprintf("EventMouse Modifiers: %d Buttons: %d Position: %d,%d", mod, btns, x, y)) +} +``` + +### Mouse buttons + +Identifier | Alias | Description +-----------|-----------------|----------- +Button1 | ButtonPrimary | Left button +Button2 | ButtonSecondary | Right button +Button3 | ButtonMiddle | Middle button +Button4 | | Side button (thumb/next) +Button5 | | Side button (thumb/prev) + +## Usage + +To create a tcell application, first initialize a screen to hold it. + +```golang +s, err := tcell.NewScreen() +if err != nil { + log.Fatalf("%+v", err) +} +if err := s.Init(); err != nil { + log.Fatalf("%+v", err) +} + +// Set default text style +defStyle := tcell.StyleDefault.Background(tcell.ColorReset).Foreground(tcell.ColorReset) +s.SetStyle(defStyle) + +// Clear screen +s.Clear() +``` + +Text may be drawn on the screen using `SetContent`. + +```golang +s.SetContent(0, 0, 'H', nil, defStyle) +s.SetContent(1, 0, 'i', nil, defStyle) +s.SetContent(2, 0, '!', nil, defStyle) +``` + +To draw text more easily, define a render function. + +```golang +func drawText(s tcell.Screen, x1, y1, x2, y2 int, style tcell.Style, text string) { + row := y1 + col := x1 + for _, r := range []rune(text) { + s.SetContent(col, row, r, nil, style) + col++ + if col >= x2 { + row++ + col = x1 + } + if row > y2 { + break + } + } +} +``` + +Lastly, define an event loop to handle user input and update application state. + +```golang +quit := func() { + s.Fini() + os.Exit(0) +} +for { + // Update screen + s.Show() + + // Poll event + ev := s.PollEvent() + + // Process event + switch ev := ev.(type) { + case *tcell.EventResize: + s.Sync() + case *tcell.EventKey: + if ev.Key() == tcell.KeyEscape || ev.Key() == tcell.KeyCtrlC { + quit() + } + } +} +``` + +## Demo application + +The following demonstrates how to initialize a screen, draw text/graphics and handle user input. + +```golang +package main + +import ( + "fmt" + "log" + "os" + + "github.com/gdamore/tcell/v2" +) + +func drawText(s tcell.Screen, x1, y1, x2, y2 int, style tcell.Style, text string) { + row := y1 + col := x1 + for _, r := range []rune(text) { + s.SetContent(col, row, r, nil, style) + col++ + if col >= x2 { + row++ + col = x1 + } + if row > y2 { + break + } + } +} + +func drawBox(s tcell.Screen, x1, y1, x2, y2 int, style tcell.Style, text string) { + if y2 < y1 { + y1, y2 = y2, y1 + } + if x2 < x1 { + x1, x2 = x2, x1 + } + + // Fill background + for row := y1; row <= y2; row++ { + for col := x1; col <= x2; col++ { + s.SetContent(col, row, ' ', nil, style) + } + } + + // Draw borders + for col := x1; col <= x2; col++ { + s.SetContent(col, y1, tcell.RuneHLine, nil, style) + s.SetContent(col, y2, tcell.RuneHLine, nil, style) + } + for row := y1 + 1; row < y2; row++ { + s.SetContent(x1, row, tcell.RuneVLine, nil, style) + s.SetContent(x2, row, tcell.RuneVLine, nil, style) + } + + // Only draw corners if necessary + if y1 != y2 && x1 != x2 { + s.SetContent(x1, y1, tcell.RuneULCorner, nil, style) + s.SetContent(x2, y1, tcell.RuneURCorner, nil, style) + s.SetContent(x1, y2, tcell.RuneLLCorner, nil, style) + s.SetContent(x2, y2, tcell.RuneLRCorner, nil, style) + } + + drawText(s, x1+1, y1+1, x2-1, y2-1, style, text) +} + +func main() { + defStyle := tcell.StyleDefault.Background(tcell.ColorReset).Foreground(tcell.ColorReset) + boxStyle := tcell.StyleDefault.Foreground(tcell.ColorWhite).Background(tcell.ColorPurple) + + // Initialize screen + s, err := tcell.NewScreen() + if err != nil { + log.Fatalf("%+v", err) + } + if err := s.Init(); err != nil { + log.Fatalf("%+v", err) + } + s.SetStyle(defStyle) + s.EnableMouse() + s.EnablePaste() + s.Clear() + + // Draw initial boxes + drawBox(s, 1, 1, 42, 7, boxStyle, "Click and drag to draw a box") + drawBox(s, 5, 9, 32, 14, boxStyle, "Press C to reset") + + // Event loop + ox, oy := -1, -1 + quit := func() { + s.Fini() + os.Exit(0) + } + for { + // Update screen + s.Show() + + // Poll event + ev := s.PollEvent() + + // Process event + switch ev := ev.(type) { + case *tcell.EventResize: + s.Sync() + case *tcell.EventKey: + if ev.Key() == tcell.KeyEscape || ev.Key() == tcell.KeyCtrlC { + quit() + } else if ev.Key() == tcell.KeyCtrlL { + s.Sync() + } else if ev.Rune() == 'C' || ev.Rune() == 'c' { + s.Clear() + } + case *tcell.EventMouse: + x, y := ev.Position() + button := ev.Buttons() + // Only process button events, not wheel events + button &= tcell.ButtonMask(0xff) + + if button != tcell.ButtonNone && ox < 0 { + ox, oy = x, y + } + switch ev.Buttons() { + case tcell.ButtonNone: + if ox >= 0 { + label := fmt.Sprintf("%d,%d to %d,%d", ox, oy, x, y) + drawBox(s, ox, oy, x, y, boxStyle, label) + ox, oy = -1, -1 + } + } + } + } +} +``` diff --git a/vendor/github.com/gdamore/tcell/v2/attr.go b/vendor/github.com/gdamore/tcell/v2/attr.go new file mode 100644 index 0000000000..8b1eab7758 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/attr.go @@ -0,0 +1,33 @@ +// Copyright 2020 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// AttrMask represents a mask of text attributes, apart from color. +// Note that support for attributes may vary widely across terminals. +type AttrMask int + +// Attributes are not colors, but affect the display of text. They can +// be combined. +const ( + AttrBold AttrMask = 1 << iota + AttrBlink + AttrReverse + AttrUnderline + AttrDim + AttrItalic + AttrStrikeThrough + AttrInvalid // Mark the style or attributes invalid + AttrNone AttrMask = 0 // Just normal text. +) diff --git a/vendor/github.com/gdamore/tcell/v2/cell.go b/vendor/github.com/gdamore/tcell/v2/cell.go new file mode 100644 index 0000000000..c7f8f1ade1 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/cell.go @@ -0,0 +1,177 @@ +// Copyright 2019 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + runewidth "github.com/mattn/go-runewidth" +) + +type cell struct { + currMain rune + currComb []rune + currStyle Style + lastMain rune + lastStyle Style + lastComb []rune + width int +} + +// CellBuffer represents a two dimensional array of character cells. +// This is primarily intended for use by Screen implementors; it +// contains much of the common code they need. To create one, just +// declare a variable of its type; no explicit initialization is necessary. +// +// CellBuffer is not thread safe. +type CellBuffer struct { + w int + h int + cells []cell +} + +// SetContent sets the contents (primary rune, combining runes, +// and style) for a cell at a given location. +func (cb *CellBuffer) SetContent(x int, y int, + mainc rune, combc []rune, style Style) { + + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + + c.currComb = append([]rune{}, combc...) + + if c.currMain != mainc { + c.width = runewidth.RuneWidth(mainc) + } + c.currMain = mainc + c.currStyle = style + } +} + +// GetContent returns the contents of a character cell, including the +// primary rune, any combining character runes (which will usually be +// nil), the style, and the display width in cells. (The width can be +// either 1, normally, or 2 for East Asian full-width characters.) +func (cb *CellBuffer) GetContent(x, y int) (rune, []rune, Style, int) { + var mainc rune + var combc []rune + var style Style + var width int + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + mainc, combc, style = c.currMain, c.currComb, c.currStyle + if width = c.width; width == 0 || mainc < ' ' { + width = 1 + mainc = ' ' + } + } + return mainc, combc, style, width +} + +// Size returns the (width, height) in cells of the buffer. +func (cb *CellBuffer) Size() (int, int) { + return cb.w, cb.h +} + +// Invalidate marks all characters within the buffer as dirty. +func (cb *CellBuffer) Invalidate() { + for i := range cb.cells { + cb.cells[i].lastMain = rune(0) + } +} + +// Dirty checks if a character at the given location needs an +// to be refreshed on the physical display. This returns true +// if the cell content is different since the last time it was +// marked clean. +func (cb *CellBuffer) Dirty(x, y int) bool { + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + if c.lastMain == rune(0) { + return true + } + if c.lastMain != c.currMain { + return true + } + if c.lastStyle != c.currStyle { + return true + } + if len(c.lastComb) != len(c.currComb) { + return true + } + for i := range c.lastComb { + if c.lastComb[i] != c.currComb[i] { + return true + } + } + } + return false +} + +// SetDirty is normally used to indicate that a cell has +// been displayed (in which case dirty is false), or to manually +// force a cell to be marked dirty. +func (cb *CellBuffer) SetDirty(x, y int, dirty bool) { + if x >= 0 && y >= 0 && x < cb.w && y < cb.h { + c := &cb.cells[(y*cb.w)+x] + if dirty { + c.lastMain = rune(0) + } else { + if c.currMain == rune(0) { + c.currMain = ' ' + } + c.lastMain = c.currMain + c.lastComb = c.currComb + c.lastStyle = c.currStyle + } + } +} + +// Resize is used to resize the cells array, with different dimensions, +// while preserving the original contents. The cells will be invalidated +// so that they can be redrawn. +func (cb *CellBuffer) Resize(w, h int) { + + if cb.h == h && cb.w == w { + return + } + + newc := make([]cell, w*h) + for y := 0; y < h && y < cb.h; y++ { + for x := 0; x < w && x < cb.w; x++ { + oc := &cb.cells[(y*cb.w)+x] + nc := &newc[(y*w)+x] + nc.currMain = oc.currMain + nc.currComb = oc.currComb + nc.currStyle = oc.currStyle + nc.width = oc.width + nc.lastMain = rune(0) + } + } + cb.cells = newc + cb.h = h + cb.w = w +} + +// Fill fills the entire cell buffer array with the specified character +// and style. Normally choose ' ' to clear the screen. This API doesn't +// support combining characters, or characters with a width larger than one. +func (cb *CellBuffer) Fill(r rune, style Style) { + for i := range cb.cells { + c := &cb.cells[i] + c.currMain = r + c.currComb = nil + c.currStyle = style + c.width = 1 + } +} diff --git a/vendor/github.com/gdamore/tcell/v2/charset_stub.go b/vendor/github.com/gdamore/tcell/v2/charset_stub.go new file mode 100644 index 0000000000..c1c1594c74 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/charset_stub.go @@ -0,0 +1,21 @@ +// +build plan9 nacl + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +func getCharset() string { + return "" +} diff --git a/vendor/github.com/gdamore/tcell/v2/charset_unix.go b/vendor/github.com/gdamore/tcell/v2/charset_unix.go new file mode 100644 index 0000000000..d9f9d8e1fe --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/charset_unix.go @@ -0,0 +1,49 @@ +// +build !windows,!nacl,!plan9 + +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "os" + "strings" +) + +func getCharset() string { + // Determine the character set. This can help us later. + // Per POSIX, we search for LC_ALL first, then LC_CTYPE, and + // finally LANG. First one set wins. + locale := "" + if locale = os.Getenv("LC_ALL"); locale == "" { + if locale = os.Getenv("LC_CTYPE"); locale == "" { + locale = os.Getenv("LANG") + } + } + if locale == "POSIX" || locale == "C" { + return "US-ASCII" + } + if i := strings.IndexRune(locale, '@'); i >= 0 { + locale = locale[:i] + } + if i := strings.IndexRune(locale, '.'); i >= 0 { + locale = locale[i+1:] + } else { + // Default assumption, and on Linux we can see LC_ALL + // without a character set, which we assume implies UTF-8. + return "UTF-8" + } + // XXX: add support for aliases + return locale +} diff --git a/vendor/github.com/gdamore/tcell/v2/charset_windows.go b/vendor/github.com/gdamore/tcell/v2/charset_windows.go new file mode 100644 index 0000000000..2400aa8a3f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/charset_windows.go @@ -0,0 +1,21 @@ +// +build windows + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +func getCharset() string { + return "UTF-16" +} diff --git a/vendor/github.com/gdamore/tcell/v2/color.go b/vendor/github.com/gdamore/tcell/v2/color.go new file mode 100644 index 0000000000..8e50fa3022 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/color.go @@ -0,0 +1,1081 @@ +// Copyright 2020 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + ic "image/color" + "strconv" +) + +// Color represents a color. The low numeric values are the same as used +// by ECMA-48, and beyond that XTerm. A 24-bit RGB value may be used by +// adding in the ColorIsRGB flag. For Color names we use the W3C approved +// color names. +// +// We use a 64-bit integer to allow future expansion if we want to add an +// 8-bit alpha, while still leaving us some room for extra options. +// +// Note that on various terminals colors may be approximated however, or +// not supported at all. If no suitable representation for a color is known, +// the library will simply not set any color, deferring to whatever default +// attributes the terminal uses. +type Color uint64 + +const ( + // ColorDefault is used to leave the Color unchanged from whatever + // system or terminal default may exist. It's also the zero value. + ColorDefault Color = 0 + + // ColorValid is used to indicate the color value is actually + // valid (initialized). This is useful to permit the zero value + // to be treated as the default. + ColorValid Color = 1 << 32 + + // ColorIsRGB is used to indicate that the numeric value is not + // a known color constant, but rather an RGB value. The lower + // order 3 bytes are RGB. + ColorIsRGB Color = 1 << 33 + + // ColorSpecial is a flag used to indicate that the values have + // special meaning, and live outside of the color space(s). + ColorSpecial Color = 1 << 34 +) + +// Note that the order of these options is important -- it follows the +// definitions used by ECMA and XTerm. Hence any further named colors +// must begin at a value not less than 256. +const ( + ColorBlack = ColorValid + iota + ColorMaroon + ColorGreen + ColorOlive + ColorNavy + ColorPurple + ColorTeal + ColorSilver + ColorGray + ColorRed + ColorLime + ColorYellow + ColorBlue + ColorFuchsia + ColorAqua + ColorWhite + Color16 + Color17 + Color18 + Color19 + Color20 + Color21 + Color22 + Color23 + Color24 + Color25 + Color26 + Color27 + Color28 + Color29 + Color30 + Color31 + Color32 + Color33 + Color34 + Color35 + Color36 + Color37 + Color38 + Color39 + Color40 + Color41 + Color42 + Color43 + Color44 + Color45 + Color46 + Color47 + Color48 + Color49 + Color50 + Color51 + Color52 + Color53 + Color54 + Color55 + Color56 + Color57 + Color58 + Color59 + Color60 + Color61 + Color62 + Color63 + Color64 + Color65 + Color66 + Color67 + Color68 + Color69 + Color70 + Color71 + Color72 + Color73 + Color74 + Color75 + Color76 + Color77 + Color78 + Color79 + Color80 + Color81 + Color82 + Color83 + Color84 + Color85 + Color86 + Color87 + Color88 + Color89 + Color90 + Color91 + Color92 + Color93 + Color94 + Color95 + Color96 + Color97 + Color98 + Color99 + Color100 + Color101 + Color102 + Color103 + Color104 + Color105 + Color106 + Color107 + Color108 + Color109 + Color110 + Color111 + Color112 + Color113 + Color114 + Color115 + Color116 + Color117 + Color118 + Color119 + Color120 + Color121 + Color122 + Color123 + Color124 + Color125 + Color126 + Color127 + Color128 + Color129 + Color130 + Color131 + Color132 + Color133 + Color134 + Color135 + Color136 + Color137 + Color138 + Color139 + Color140 + Color141 + Color142 + Color143 + Color144 + Color145 + Color146 + Color147 + Color148 + Color149 + Color150 + Color151 + Color152 + Color153 + Color154 + Color155 + Color156 + Color157 + Color158 + Color159 + Color160 + Color161 + Color162 + Color163 + Color164 + Color165 + Color166 + Color167 + Color168 + Color169 + Color170 + Color171 + Color172 + Color173 + Color174 + Color175 + Color176 + Color177 + Color178 + Color179 + Color180 + Color181 + Color182 + Color183 + Color184 + Color185 + Color186 + Color187 + Color188 + Color189 + Color190 + Color191 + Color192 + Color193 + Color194 + Color195 + Color196 + Color197 + Color198 + Color199 + Color200 + Color201 + Color202 + Color203 + Color204 + Color205 + Color206 + Color207 + Color208 + Color209 + Color210 + Color211 + Color212 + Color213 + Color214 + Color215 + Color216 + Color217 + Color218 + Color219 + Color220 + Color221 + Color222 + Color223 + Color224 + Color225 + Color226 + Color227 + Color228 + Color229 + Color230 + Color231 + Color232 + Color233 + Color234 + Color235 + Color236 + Color237 + Color238 + Color239 + Color240 + Color241 + Color242 + Color243 + Color244 + Color245 + Color246 + Color247 + Color248 + Color249 + Color250 + Color251 + Color252 + Color253 + Color254 + Color255 + ColorAliceBlue + ColorAntiqueWhite + ColorAquaMarine + ColorAzure + ColorBeige + ColorBisque + ColorBlanchedAlmond + ColorBlueViolet + ColorBrown + ColorBurlyWood + ColorCadetBlue + ColorChartreuse + ColorChocolate + ColorCoral + ColorCornflowerBlue + ColorCornsilk + ColorCrimson + ColorDarkBlue + ColorDarkCyan + ColorDarkGoldenrod + ColorDarkGray + ColorDarkGreen + ColorDarkKhaki + ColorDarkMagenta + ColorDarkOliveGreen + ColorDarkOrange + ColorDarkOrchid + ColorDarkRed + ColorDarkSalmon + ColorDarkSeaGreen + ColorDarkSlateBlue + ColorDarkSlateGray + ColorDarkTurquoise + ColorDarkViolet + ColorDeepPink + ColorDeepSkyBlue + ColorDimGray + ColorDodgerBlue + ColorFireBrick + ColorFloralWhite + ColorForestGreen + ColorGainsboro + ColorGhostWhite + ColorGold + ColorGoldenrod + ColorGreenYellow + ColorHoneydew + ColorHotPink + ColorIndianRed + ColorIndigo + ColorIvory + ColorKhaki + ColorLavender + ColorLavenderBlush + ColorLawnGreen + ColorLemonChiffon + ColorLightBlue + ColorLightCoral + ColorLightCyan + ColorLightGoldenrodYellow + ColorLightGray + ColorLightGreen + ColorLightPink + ColorLightSalmon + ColorLightSeaGreen + ColorLightSkyBlue + ColorLightSlateGray + ColorLightSteelBlue + ColorLightYellow + ColorLimeGreen + ColorLinen + ColorMediumAquamarine + ColorMediumBlue + ColorMediumOrchid + ColorMediumPurple + ColorMediumSeaGreen + ColorMediumSlateBlue + ColorMediumSpringGreen + ColorMediumTurquoise + ColorMediumVioletRed + ColorMidnightBlue + ColorMintCream + ColorMistyRose + ColorMoccasin + ColorNavajoWhite + ColorOldLace + ColorOliveDrab + ColorOrange + ColorOrangeRed + ColorOrchid + ColorPaleGoldenrod + ColorPaleGreen + ColorPaleTurquoise + ColorPaleVioletRed + ColorPapayaWhip + ColorPeachPuff + ColorPeru + ColorPink + ColorPlum + ColorPowderBlue + ColorRebeccaPurple + ColorRosyBrown + ColorRoyalBlue + ColorSaddleBrown + ColorSalmon + ColorSandyBrown + ColorSeaGreen + ColorSeashell + ColorSienna + ColorSkyblue + ColorSlateBlue + ColorSlateGray + ColorSnow + ColorSpringGreen + ColorSteelBlue + ColorTan + ColorThistle + ColorTomato + ColorTurquoise + ColorViolet + ColorWheat + ColorWhiteSmoke + ColorYellowGreen +) + +// These are aliases for the color gray, because some of us spell +// it as grey. +const ( + ColorGrey = ColorGray + ColorDimGrey = ColorDimGray + ColorDarkGrey = ColorDarkGray + ColorDarkSlateGrey = ColorDarkSlateGray + ColorLightGrey = ColorLightGray + ColorLightSlateGrey = ColorLightSlateGray + ColorSlateGrey = ColorSlateGray +) + +// ColorValues maps color constants to their RGB values. +var ColorValues = map[Color]int32{ + ColorBlack: 0x000000, + ColorMaroon: 0x800000, + ColorGreen: 0x008000, + ColorOlive: 0x808000, + ColorNavy: 0x000080, + ColorPurple: 0x800080, + ColorTeal: 0x008080, + ColorSilver: 0xC0C0C0, + ColorGray: 0x808080, + ColorRed: 0xFF0000, + ColorLime: 0x00FF00, + ColorYellow: 0xFFFF00, + ColorBlue: 0x0000FF, + ColorFuchsia: 0xFF00FF, + ColorAqua: 0x00FFFF, + ColorWhite: 0xFFFFFF, + Color16: 0x000000, // black + Color17: 0x00005F, + Color18: 0x000087, + Color19: 0x0000AF, + Color20: 0x0000D7, + Color21: 0x0000FF, // blue + Color22: 0x005F00, + Color23: 0x005F5F, + Color24: 0x005F87, + Color25: 0x005FAF, + Color26: 0x005FD7, + Color27: 0x005FFF, + Color28: 0x008700, + Color29: 0x00875F, + Color30: 0x008787, + Color31: 0x0087Af, + Color32: 0x0087D7, + Color33: 0x0087FF, + Color34: 0x00AF00, + Color35: 0x00AF5F, + Color36: 0x00AF87, + Color37: 0x00AFAF, + Color38: 0x00AFD7, + Color39: 0x00AFFF, + Color40: 0x00D700, + Color41: 0x00D75F, + Color42: 0x00D787, + Color43: 0x00D7AF, + Color44: 0x00D7D7, + Color45: 0x00D7FF, + Color46: 0x00FF00, // lime + Color47: 0x00FF5F, + Color48: 0x00FF87, + Color49: 0x00FFAF, + Color50: 0x00FFd7, + Color51: 0x00FFFF, // aqua + Color52: 0x5F0000, + Color53: 0x5F005F, + Color54: 0x5F0087, + Color55: 0x5F00AF, + Color56: 0x5F00D7, + Color57: 0x5F00FF, + Color58: 0x5F5F00, + Color59: 0x5F5F5F, + Color60: 0x5F5F87, + Color61: 0x5F5FAF, + Color62: 0x5F5FD7, + Color63: 0x5F5FFF, + Color64: 0x5F8700, + Color65: 0x5F875F, + Color66: 0x5F8787, + Color67: 0x5F87AF, + Color68: 0x5F87D7, + Color69: 0x5F87FF, + Color70: 0x5FAF00, + Color71: 0x5FAF5F, + Color72: 0x5FAF87, + Color73: 0x5FAFAF, + Color74: 0x5FAFD7, + Color75: 0x5FAFFF, + Color76: 0x5FD700, + Color77: 0x5FD75F, + Color78: 0x5FD787, + Color79: 0x5FD7AF, + Color80: 0x5FD7D7, + Color81: 0x5FD7FF, + Color82: 0x5FFF00, + Color83: 0x5FFF5F, + Color84: 0x5FFF87, + Color85: 0x5FFFAF, + Color86: 0x5FFFD7, + Color87: 0x5FFFFF, + Color88: 0x870000, + Color89: 0x87005F, + Color90: 0x870087, + Color91: 0x8700AF, + Color92: 0x8700D7, + Color93: 0x8700FF, + Color94: 0x875F00, + Color95: 0x875F5F, + Color96: 0x875F87, + Color97: 0x875FAF, + Color98: 0x875FD7, + Color99: 0x875FFF, + Color100: 0x878700, + Color101: 0x87875F, + Color102: 0x878787, + Color103: 0x8787AF, + Color104: 0x8787D7, + Color105: 0x8787FF, + Color106: 0x87AF00, + Color107: 0x87AF5F, + Color108: 0x87AF87, + Color109: 0x87AFAF, + Color110: 0x87AFD7, + Color111: 0x87AFFF, + Color112: 0x87D700, + Color113: 0x87D75F, + Color114: 0x87D787, + Color115: 0x87D7AF, + Color116: 0x87D7D7, + Color117: 0x87D7FF, + Color118: 0x87FF00, + Color119: 0x87FF5F, + Color120: 0x87FF87, + Color121: 0x87FFAF, + Color122: 0x87FFD7, + Color123: 0x87FFFF, + Color124: 0xAF0000, + Color125: 0xAF005F, + Color126: 0xAF0087, + Color127: 0xAF00AF, + Color128: 0xAF00D7, + Color129: 0xAF00FF, + Color130: 0xAF5F00, + Color131: 0xAF5F5F, + Color132: 0xAF5F87, + Color133: 0xAF5FAF, + Color134: 0xAF5FD7, + Color135: 0xAF5FFF, + Color136: 0xAF8700, + Color137: 0xAF875F, + Color138: 0xAF8787, + Color139: 0xAF87AF, + Color140: 0xAF87D7, + Color141: 0xAF87FF, + Color142: 0xAFAF00, + Color143: 0xAFAF5F, + Color144: 0xAFAF87, + Color145: 0xAFAFAF, + Color146: 0xAFAFD7, + Color147: 0xAFAFFF, + Color148: 0xAFD700, + Color149: 0xAFD75F, + Color150: 0xAFD787, + Color151: 0xAFD7AF, + Color152: 0xAFD7D7, + Color153: 0xAFD7FF, + Color154: 0xAFFF00, + Color155: 0xAFFF5F, + Color156: 0xAFFF87, + Color157: 0xAFFFAF, + Color158: 0xAFFFD7, + Color159: 0xAFFFFF, + Color160: 0xD70000, + Color161: 0xD7005F, + Color162: 0xD70087, + Color163: 0xD700AF, + Color164: 0xD700D7, + Color165: 0xD700FF, + Color166: 0xD75F00, + Color167: 0xD75F5F, + Color168: 0xD75F87, + Color169: 0xD75FAF, + Color170: 0xD75FD7, + Color171: 0xD75FFF, + Color172: 0xD78700, + Color173: 0xD7875F, + Color174: 0xD78787, + Color175: 0xD787AF, + Color176: 0xD787D7, + Color177: 0xD787FF, + Color178: 0xD7AF00, + Color179: 0xD7AF5F, + Color180: 0xD7AF87, + Color181: 0xD7AFAF, + Color182: 0xD7AFD7, + Color183: 0xD7AFFF, + Color184: 0xD7D700, + Color185: 0xD7D75F, + Color186: 0xD7D787, + Color187: 0xD7D7AF, + Color188: 0xD7D7D7, + Color189: 0xD7D7FF, + Color190: 0xD7FF00, + Color191: 0xD7FF5F, + Color192: 0xD7FF87, + Color193: 0xD7FFAF, + Color194: 0xD7FFD7, + Color195: 0xD7FFFF, + Color196: 0xFF0000, // red + Color197: 0xFF005F, + Color198: 0xFF0087, + Color199: 0xFF00AF, + Color200: 0xFF00D7, + Color201: 0xFF00FF, // fuchsia + Color202: 0xFF5F00, + Color203: 0xFF5F5F, + Color204: 0xFF5F87, + Color205: 0xFF5FAF, + Color206: 0xFF5FD7, + Color207: 0xFF5FFF, + Color208: 0xFF8700, + Color209: 0xFF875F, + Color210: 0xFF8787, + Color211: 0xFF87AF, + Color212: 0xFF87D7, + Color213: 0xFF87FF, + Color214: 0xFFAF00, + Color215: 0xFFAF5F, + Color216: 0xFFAF87, + Color217: 0xFFAFAF, + Color218: 0xFFAFD7, + Color219: 0xFFAFFF, + Color220: 0xFFD700, + Color221: 0xFFD75F, + Color222: 0xFFD787, + Color223: 0xFFD7AF, + Color224: 0xFFD7D7, + Color225: 0xFFD7FF, + Color226: 0xFFFF00, // yellow + Color227: 0xFFFF5F, + Color228: 0xFFFF87, + Color229: 0xFFFFAF, + Color230: 0xFFFFD7, + Color231: 0xFFFFFF, // white + Color232: 0x080808, + Color233: 0x121212, + Color234: 0x1C1C1C, + Color235: 0x262626, + Color236: 0x303030, + Color237: 0x3A3A3A, + Color238: 0x444444, + Color239: 0x4E4E4E, + Color240: 0x585858, + Color241: 0x626262, + Color242: 0x6C6C6C, + Color243: 0x767676, + Color244: 0x808080, // grey + Color245: 0x8A8A8A, + Color246: 0x949494, + Color247: 0x9E9E9E, + Color248: 0xA8A8A8, + Color249: 0xB2B2B2, + Color250: 0xBCBCBC, + Color251: 0xC6C6C6, + Color252: 0xD0D0D0, + Color253: 0xDADADA, + Color254: 0xE4E4E4, + Color255: 0xEEEEEE, + ColorAliceBlue: 0xF0F8FF, + ColorAntiqueWhite: 0xFAEBD7, + ColorAquaMarine: 0x7FFFD4, + ColorAzure: 0xF0FFFF, + ColorBeige: 0xF5F5DC, + ColorBisque: 0xFFE4C4, + ColorBlanchedAlmond: 0xFFEBCD, + ColorBlueViolet: 0x8A2BE2, + ColorBrown: 0xA52A2A, + ColorBurlyWood: 0xDEB887, + ColorCadetBlue: 0x5F9EA0, + ColorChartreuse: 0x7FFF00, + ColorChocolate: 0xD2691E, + ColorCoral: 0xFF7F50, + ColorCornflowerBlue: 0x6495ED, + ColorCornsilk: 0xFFF8DC, + ColorCrimson: 0xDC143C, + ColorDarkBlue: 0x00008B, + ColorDarkCyan: 0x008B8B, + ColorDarkGoldenrod: 0xB8860B, + ColorDarkGray: 0xA9A9A9, + ColorDarkGreen: 0x006400, + ColorDarkKhaki: 0xBDB76B, + ColorDarkMagenta: 0x8B008B, + ColorDarkOliveGreen: 0x556B2F, + ColorDarkOrange: 0xFF8C00, + ColorDarkOrchid: 0x9932CC, + ColorDarkRed: 0x8B0000, + ColorDarkSalmon: 0xE9967A, + ColorDarkSeaGreen: 0x8FBC8F, + ColorDarkSlateBlue: 0x483D8B, + ColorDarkSlateGray: 0x2F4F4F, + ColorDarkTurquoise: 0x00CED1, + ColorDarkViolet: 0x9400D3, + ColorDeepPink: 0xFF1493, + ColorDeepSkyBlue: 0x00BFFF, + ColorDimGray: 0x696969, + ColorDodgerBlue: 0x1E90FF, + ColorFireBrick: 0xB22222, + ColorFloralWhite: 0xFFFAF0, + ColorForestGreen: 0x228B22, + ColorGainsboro: 0xDCDCDC, + ColorGhostWhite: 0xF8F8FF, + ColorGold: 0xFFD700, + ColorGoldenrod: 0xDAA520, + ColorGreenYellow: 0xADFF2F, + ColorHoneydew: 0xF0FFF0, + ColorHotPink: 0xFF69B4, + ColorIndianRed: 0xCD5C5C, + ColorIndigo: 0x4B0082, + ColorIvory: 0xFFFFF0, + ColorKhaki: 0xF0E68C, + ColorLavender: 0xE6E6FA, + ColorLavenderBlush: 0xFFF0F5, + ColorLawnGreen: 0x7CFC00, + ColorLemonChiffon: 0xFFFACD, + ColorLightBlue: 0xADD8E6, + ColorLightCoral: 0xF08080, + ColorLightCyan: 0xE0FFFF, + ColorLightGoldenrodYellow: 0xFAFAD2, + ColorLightGray: 0xD3D3D3, + ColorLightGreen: 0x90EE90, + ColorLightPink: 0xFFB6C1, + ColorLightSalmon: 0xFFA07A, + ColorLightSeaGreen: 0x20B2AA, + ColorLightSkyBlue: 0x87CEFA, + ColorLightSlateGray: 0x778899, + ColorLightSteelBlue: 0xB0C4DE, + ColorLightYellow: 0xFFFFE0, + ColorLimeGreen: 0x32CD32, + ColorLinen: 0xFAF0E6, + ColorMediumAquamarine: 0x66CDAA, + ColorMediumBlue: 0x0000CD, + ColorMediumOrchid: 0xBA55D3, + ColorMediumPurple: 0x9370DB, + ColorMediumSeaGreen: 0x3CB371, + ColorMediumSlateBlue: 0x7B68EE, + ColorMediumSpringGreen: 0x00FA9A, + ColorMediumTurquoise: 0x48D1CC, + ColorMediumVioletRed: 0xC71585, + ColorMidnightBlue: 0x191970, + ColorMintCream: 0xF5FFFA, + ColorMistyRose: 0xFFE4E1, + ColorMoccasin: 0xFFE4B5, + ColorNavajoWhite: 0xFFDEAD, + ColorOldLace: 0xFDF5E6, + ColorOliveDrab: 0x6B8E23, + ColorOrange: 0xFFA500, + ColorOrangeRed: 0xFF4500, + ColorOrchid: 0xDA70D6, + ColorPaleGoldenrod: 0xEEE8AA, + ColorPaleGreen: 0x98FB98, + ColorPaleTurquoise: 0xAFEEEE, + ColorPaleVioletRed: 0xDB7093, + ColorPapayaWhip: 0xFFEFD5, + ColorPeachPuff: 0xFFDAB9, + ColorPeru: 0xCD853F, + ColorPink: 0xFFC0CB, + ColorPlum: 0xDDA0DD, + ColorPowderBlue: 0xB0E0E6, + ColorRebeccaPurple: 0x663399, + ColorRosyBrown: 0xBC8F8F, + ColorRoyalBlue: 0x4169E1, + ColorSaddleBrown: 0x8B4513, + ColorSalmon: 0xFA8072, + ColorSandyBrown: 0xF4A460, + ColorSeaGreen: 0x2E8B57, + ColorSeashell: 0xFFF5EE, + ColorSienna: 0xA0522D, + ColorSkyblue: 0x87CEEB, + ColorSlateBlue: 0x6A5ACD, + ColorSlateGray: 0x708090, + ColorSnow: 0xFFFAFA, + ColorSpringGreen: 0x00FF7F, + ColorSteelBlue: 0x4682B4, + ColorTan: 0xD2B48C, + ColorThistle: 0xD8BFD8, + ColorTomato: 0xFF6347, + ColorTurquoise: 0x40E0D0, + ColorViolet: 0xEE82EE, + ColorWheat: 0xF5DEB3, + ColorWhiteSmoke: 0xF5F5F5, + ColorYellowGreen: 0x9ACD32, +} + +// Special colors. +const ( + // ColorReset is used to indicate that the color should use the + // vanilla terminal colors. (Basically go back to the defaults.) + ColorReset = ColorSpecial | iota +) + +// ColorNames holds the written names of colors. Useful to present a list of +// recognized named colors. +var ColorNames = map[string]Color{ + "black": ColorBlack, + "maroon": ColorMaroon, + "green": ColorGreen, + "olive": ColorOlive, + "navy": ColorNavy, + "purple": ColorPurple, + "teal": ColorTeal, + "silver": ColorSilver, + "gray": ColorGray, + "red": ColorRed, + "lime": ColorLime, + "yellow": ColorYellow, + "blue": ColorBlue, + "fuchsia": ColorFuchsia, + "aqua": ColorAqua, + "white": ColorWhite, + "aliceblue": ColorAliceBlue, + "antiquewhite": ColorAntiqueWhite, + "aquamarine": ColorAquaMarine, + "azure": ColorAzure, + "beige": ColorBeige, + "bisque": ColorBisque, + "blanchedalmond": ColorBlanchedAlmond, + "blueviolet": ColorBlueViolet, + "brown": ColorBrown, + "burlywood": ColorBurlyWood, + "cadetblue": ColorCadetBlue, + "chartreuse": ColorChartreuse, + "chocolate": ColorChocolate, + "coral": ColorCoral, + "cornflowerblue": ColorCornflowerBlue, + "cornsilk": ColorCornsilk, + "crimson": ColorCrimson, + "darkblue": ColorDarkBlue, + "darkcyan": ColorDarkCyan, + "darkgoldenrod": ColorDarkGoldenrod, + "darkgray": ColorDarkGray, + "darkgreen": ColorDarkGreen, + "darkkhaki": ColorDarkKhaki, + "darkmagenta": ColorDarkMagenta, + "darkolivegreen": ColorDarkOliveGreen, + "darkorange": ColorDarkOrange, + "darkorchid": ColorDarkOrchid, + "darkred": ColorDarkRed, + "darksalmon": ColorDarkSalmon, + "darkseagreen": ColorDarkSeaGreen, + "darkslateblue": ColorDarkSlateBlue, + "darkslategray": ColorDarkSlateGray, + "darkturquoise": ColorDarkTurquoise, + "darkviolet": ColorDarkViolet, + "deeppink": ColorDeepPink, + "deepskyblue": ColorDeepSkyBlue, + "dimgray": ColorDimGray, + "dodgerblue": ColorDodgerBlue, + "firebrick": ColorFireBrick, + "floralwhite": ColorFloralWhite, + "forestgreen": ColorForestGreen, + "gainsboro": ColorGainsboro, + "ghostwhite": ColorGhostWhite, + "gold": ColorGold, + "goldenrod": ColorGoldenrod, + "greenyellow": ColorGreenYellow, + "honeydew": ColorHoneydew, + "hotpink": ColorHotPink, + "indianred": ColorIndianRed, + "indigo": ColorIndigo, + "ivory": ColorIvory, + "khaki": ColorKhaki, + "lavender": ColorLavender, + "lavenderblush": ColorLavenderBlush, + "lawngreen": ColorLawnGreen, + "lemonchiffon": ColorLemonChiffon, + "lightblue": ColorLightBlue, + "lightcoral": ColorLightCoral, + "lightcyan": ColorLightCyan, + "lightgoldenrodyellow": ColorLightGoldenrodYellow, + "lightgray": ColorLightGray, + "lightgreen": ColorLightGreen, + "lightpink": ColorLightPink, + "lightsalmon": ColorLightSalmon, + "lightseagreen": ColorLightSeaGreen, + "lightskyblue": ColorLightSkyBlue, + "lightslategray": ColorLightSlateGray, + "lightsteelblue": ColorLightSteelBlue, + "lightyellow": ColorLightYellow, + "limegreen": ColorLimeGreen, + "linen": ColorLinen, + "mediumaquamarine": ColorMediumAquamarine, + "mediumblue": ColorMediumBlue, + "mediumorchid": ColorMediumOrchid, + "mediumpurple": ColorMediumPurple, + "mediumseagreen": ColorMediumSeaGreen, + "mediumslateblue": ColorMediumSlateBlue, + "mediumspringgreen": ColorMediumSpringGreen, + "mediumturquoise": ColorMediumTurquoise, + "mediumvioletred": ColorMediumVioletRed, + "midnightblue": ColorMidnightBlue, + "mintcream": ColorMintCream, + "mistyrose": ColorMistyRose, + "moccasin": ColorMoccasin, + "navajowhite": ColorNavajoWhite, + "oldlace": ColorOldLace, + "olivedrab": ColorOliveDrab, + "orange": ColorOrange, + "orangered": ColorOrangeRed, + "orchid": ColorOrchid, + "palegoldenrod": ColorPaleGoldenrod, + "palegreen": ColorPaleGreen, + "paleturquoise": ColorPaleTurquoise, + "palevioletred": ColorPaleVioletRed, + "papayawhip": ColorPapayaWhip, + "peachpuff": ColorPeachPuff, + "peru": ColorPeru, + "pink": ColorPink, + "plum": ColorPlum, + "powderblue": ColorPowderBlue, + "rebeccapurple": ColorRebeccaPurple, + "rosybrown": ColorRosyBrown, + "royalblue": ColorRoyalBlue, + "saddlebrown": ColorSaddleBrown, + "salmon": ColorSalmon, + "sandybrown": ColorSandyBrown, + "seagreen": ColorSeaGreen, + "seashell": ColorSeashell, + "sienna": ColorSienna, + "skyblue": ColorSkyblue, + "slateblue": ColorSlateBlue, + "slategray": ColorSlateGray, + "snow": ColorSnow, + "springgreen": ColorSpringGreen, + "steelblue": ColorSteelBlue, + "tan": ColorTan, + "thistle": ColorThistle, + "tomato": ColorTomato, + "turquoise": ColorTurquoise, + "violet": ColorViolet, + "wheat": ColorWheat, + "whitesmoke": ColorWhiteSmoke, + "yellowgreen": ColorYellowGreen, + "grey": ColorGray, + "dimgrey": ColorDimGray, + "darkgrey": ColorDarkGray, + "darkslategrey": ColorDarkSlateGray, + "lightgrey": ColorLightGray, + "lightslategrey": ColorLightSlateGray, + "slategrey": ColorSlateGray, +} + +// Valid indicates the color is a valid value (has been set). +func (c Color) Valid() bool { + return c&ColorValid != 0 +} + +// IsRGB is true if the color is an RGB specific value. +func (c Color) IsRGB() bool { + return c&(ColorValid|ColorIsRGB) == (ColorValid | ColorIsRGB) +} + +// Hex returns the color's hexadecimal RGB 24-bit value with each component +// consisting of a single byte, ala R << 16 | G << 8 | B. If the color +// is unknown or unset, -1 is returned. +func (c Color) Hex() int32 { + if !c.Valid() { + return -1 + } + if c&ColorIsRGB != 0 { + return int32(c) & 0xffffff + } + if v, ok := ColorValues[c]; ok { + return v + } + return -1 +} + +// RGB returns the red, green, and blue components of the color, with +// each component represented as a value 0-255. In the event that the +// color cannot be broken up (not set usually), -1 is returned for each value. +func (c Color) RGB() (int32, int32, int32) { + v := c.Hex() + if v < 0 { + return -1, -1, -1 + } + return (v >> 16) & 0xff, (v >> 8) & 0xff, v & 0xff +} + +// TrueColor returns the true color (RGB) version of the provided color. +// This is useful for ensuring color accuracy when using named colors. +// This will override terminal theme colors. +func (c Color) TrueColor() Color { + if !c.Valid() { + return ColorDefault + } + if c&ColorIsRGB != 0 { + return c + } + return Color(c.Hex()) | ColorIsRGB | ColorValid +} + +// NewRGBColor returns a new color with the given red, green, and blue values. +// Each value must be represented in the range 0-255. +func NewRGBColor(r, g, b int32) Color { + return NewHexColor(((r & 0xff) << 16) | ((g & 0xff) << 8) | (b & 0xff)) +} + +// NewHexColor returns a color using the given 24-bit RGB value. +func NewHexColor(v int32) Color { + return ColorIsRGB | Color(v) | ColorValid +} + +// GetColor creates a Color from a color name (W3C name). A hex value may +// be supplied as a string in the format "#ffffff". +func GetColor(name string) Color { + if c, ok := ColorNames[name]; ok { + return c + } + if len(name) == 7 && name[0] == '#' { + if v, e := strconv.ParseInt(name[1:], 16, 32); e == nil { + return NewHexColor(int32(v)) + } + } + return ColorDefault +} + +// PaletteColor creates a color based on the palette index. +func PaletteColor(index int) Color { + return Color(index) | ColorValid +} + +// FromImageColor converts an image/color.Color into tcell.Color. +// The alpha value is dropped, so it should be tracked separately if it is +// needed. +func FromImageColor(imageColor ic.Color) Color { + r, g, b, _ := imageColor.RGBA() + // NOTE image/color.Color RGB values range is [0, 0xFFFF] as uint32 + return NewRGBColor(int32(r>>8), int32(g>>8), int32(b>>8)) +} diff --git a/vendor/github.com/gdamore/tcell/v2/colorfit.go b/vendor/github.com/gdamore/tcell/v2/colorfit.go new file mode 100644 index 0000000000..b7740b8ae2 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/colorfit.go @@ -0,0 +1,52 @@ +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "github.com/lucasb-eyer/go-colorful" + "math" +) + +// FindColor attempts to find a given color, or the best match possible for it, +// from the palette given. This is an expensive operation, so results should +// be cached by the caller. +func FindColor(c Color, palette []Color) Color { + match := ColorDefault + dist := float64(0) + r, g, b := c.RGB() + c1 := colorful.Color{ + R: float64(r) / 255.0, + G: float64(g) / 255.0, + B: float64(b) / 255.0, + } + for _, d := range palette { + r, g, b = d.RGB() + c2 := colorful.Color{ + R: float64(r) / 255.0, + G: float64(g) / 255.0, + B: float64(b) / 255.0, + } + // CIE94 is more accurate, but really really expensive. + nd := c1.DistanceCIE76(c2) + if math.IsNaN(nd) { + nd = math.Inf(1) + } + if match == ColorDefault || nd < dist { + match = d + dist = nd + } + } + return match +} diff --git a/vendor/github.com/gdamore/tcell/v2/console_stub.go b/vendor/github.com/gdamore/tcell/v2/console_stub.go new file mode 100644 index 0000000000..fda2f0926c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/console_stub.go @@ -0,0 +1,23 @@ +// +build !windows + +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// NewConsoleScreen returns a console based screen. This platform +// doesn't have support for any, so it returns nil and a suitable error. +func NewConsoleScreen() (Screen, error) { + return nil, ErrNoScreen +} diff --git a/vendor/github.com/gdamore/tcell/v2/console_win.go b/vendor/github.com/gdamore/tcell/v2/console_win.go new file mode 100644 index 0000000000..a901a1255d --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/console_win.go @@ -0,0 +1,1269 @@ +// +build windows + +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "errors" + "fmt" + "os" + "strings" + "sync" + "syscall" + "unicode/utf16" + "unsafe" +) + +type cScreen struct { + in syscall.Handle + out syscall.Handle + cancelflag syscall.Handle + scandone chan struct{} + evch chan Event + quit chan struct{} + curx int + cury int + style Style + clear bool + fini bool + vten bool + truecolor bool + running bool + + w int + h int + + oscreen consoleInfo + ocursor cursorInfo + oimode uint32 + oomode uint32 + cells CellBuffer + + finiOnce sync.Once + + mouseEnabled bool + wg sync.WaitGroup + stopQ chan struct{} + + sync.Mutex +} + +var winLock sync.Mutex + +var winPalette = []Color{ + ColorBlack, + ColorMaroon, + ColorGreen, + ColorNavy, + ColorOlive, + ColorPurple, + ColorTeal, + ColorSilver, + ColorGray, + ColorRed, + ColorLime, + ColorBlue, + ColorYellow, + ColorFuchsia, + ColorAqua, + ColorWhite, +} + +var winColors = map[Color]Color{ + ColorBlack: ColorBlack, + ColorMaroon: ColorMaroon, + ColorGreen: ColorGreen, + ColorNavy: ColorNavy, + ColorOlive: ColorOlive, + ColorPurple: ColorPurple, + ColorTeal: ColorTeal, + ColorSilver: ColorSilver, + ColorGray: ColorGray, + ColorRed: ColorRed, + ColorLime: ColorLime, + ColorBlue: ColorBlue, + ColorYellow: ColorYellow, + ColorFuchsia: ColorFuchsia, + ColorAqua: ColorAqua, + ColorWhite: ColorWhite, +} + +var ( + k32 = syscall.NewLazyDLL("kernel32.dll") + u32 = syscall.NewLazyDLL("user32.dll") +) + +// We have to bring in the kernel32 and user32 DLLs directly, so we can get +// access to some system calls that the core Go API lacks. +// +// Note that Windows appends some functions with W to indicate that wide +// characters (Unicode) are in use. The documentation refers to them +// without this suffix, as the resolution is made via preprocessor. +var ( + procReadConsoleInput = k32.NewProc("ReadConsoleInputW") + procWaitForMultipleObjects = k32.NewProc("WaitForMultipleObjects") + procCreateEvent = k32.NewProc("CreateEventW") + procSetEvent = k32.NewProc("SetEvent") + procGetConsoleCursorInfo = k32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = k32.NewProc("SetConsoleCursorInfo") + procSetConsoleCursorPosition = k32.NewProc("SetConsoleCursorPosition") + procSetConsoleMode = k32.NewProc("SetConsoleMode") + procGetConsoleMode = k32.NewProc("GetConsoleMode") + procGetConsoleScreenBufferInfo = k32.NewProc("GetConsoleScreenBufferInfo") + procFillConsoleOutputAttribute = k32.NewProc("FillConsoleOutputAttribute") + procFillConsoleOutputCharacter = k32.NewProc("FillConsoleOutputCharacterW") + procSetConsoleWindowInfo = k32.NewProc("SetConsoleWindowInfo") + procSetConsoleScreenBufferSize = k32.NewProc("SetConsoleScreenBufferSize") + procSetConsoleTextAttribute = k32.NewProc("SetConsoleTextAttribute") + procMessageBeep = u32.NewProc("MessageBeep") +) + +const ( + w32Infinite = ^uintptr(0) + w32WaitObject0 = uintptr(0) +) + +const ( + // VT100/XTerm escapes understood by the console + vtShowCursor = "\x1b[?25h" + vtHideCursor = "\x1b[?25l" + vtCursorPos = "\x1b[%d;%dH" // Note that it is Y then X + vtSgr0 = "\x1b[0m" + vtBold = "\x1b[1m" + vtUnderline = "\x1b[4m" + vtBlink = "\x1b[5m" // Not sure this is processed + vtReverse = "\x1b[7m" + vtSetFg = "\x1b[38;5;%dm" + vtSetBg = "\x1b[48;5;%dm" + vtSetFgRGB = "\x1b[38;2;%d;%d;%dm" // RGB + vtSetBgRGB = "\x1b[48;2;%d;%d;%dm" // RGB +) + +// NewConsoleScreen returns a Screen for the Windows console associated +// with the current process. The Screen makes use of the Windows Console +// API to display content and read events. +func NewConsoleScreen() (Screen, error) { + return &cScreen{}, nil +} + +func (s *cScreen) Init() error { + s.evch = make(chan Event, 10) + s.quit = make(chan struct{}) + s.scandone = make(chan struct{}) + + in, e := syscall.Open("CONIN$", syscall.O_RDWR, 0) + if e != nil { + return e + } + s.in = in + out, e := syscall.Open("CONOUT$", syscall.O_RDWR, 0) + if e != nil { + syscall.Close(s.in) + return e + } + s.out = out + + s.truecolor = true + + // ConEmu handling of colors and scrolling when in terminal + // mode is extremely problematic at the best. The color + // palette will scroll even though characters do not, when + // emitting stuff for the last character. In the future we + // might change this to look at specific versions of ConEmu + // if they fix the bug. + if os.Getenv("ConEmuPID") != "" { + s.truecolor = false + } + switch os.Getenv("TCELL_TRUECOLOR") { + case "disable": + s.truecolor = false + case "enable": + s.truecolor = true + } + + s.Lock() + + s.curx = -1 + s.cury = -1 + s.style = StyleDefault + s.getCursorInfo(&s.ocursor) + s.getConsoleInfo(&s.oscreen) + s.getOutMode(&s.oomode) + s.getInMode(&s.oimode) + s.resize() + + s.fini = false + s.setInMode(modeResizeEn | modeExtndFlg) + + // 24-bit color is opt-in for now, because we can't figure out + // to make it work consistently. + if s.truecolor { + s.setOutMode(modeVtOutput | modeNoAutoNL | modeCookedOut) + var omode uint32 + s.getOutMode(&omode) + if omode&modeVtOutput == modeVtOutput { + s.vten = true + } else { + s.truecolor = false + s.setOutMode(0) + } + } else { + s.setOutMode(0) + } + + s.Unlock() + + return s.engage() +} + +func (s *cScreen) CharacterSet() string { + // We are always UTF-16LE on Windows + return "UTF-16LE" +} + +func (s *cScreen) EnableMouse(...MouseFlags) { + s.Lock() + s.mouseEnabled = true + s.enableMouse(true) + s.Unlock() +} + +func (s *cScreen) DisableMouse() { + s.Lock() + s.mouseEnabled = false + s.enableMouse(false) + s.Unlock() +} + +func (s *cScreen) enableMouse(on bool) { + if on { + s.setInMode(modeResizeEn | modeMouseEn | modeExtndFlg) + } else { + s.setInMode(modeResizeEn | modeExtndFlg) + } +} + +// Windows lacks bracketed paste (for now) + +func (s *cScreen) EnablePaste() {} + +func (s *cScreen) DisablePaste() {} + +func (s *cScreen) Fini() { + s.disengage() +} + +func (s *cScreen) disengage() { + s.Lock() + if !s.running { + s.Unlock() + return + } + s.running = false + stopQ := s.stopQ + procSetEvent.Call(uintptr(s.cancelflag)) + close(stopQ) + s.Unlock() + + s.wg.Wait() + + s.setInMode(s.oimode) + s.setOutMode(s.oomode) + s.setBufferSize(int(s.oscreen.size.x), int(s.oscreen.size.y)) + s.clearScreen(StyleDefault, false) + s.setCursorPos(0, 0, false) + s.setCursorInfo(&s.ocursor) + procSetConsoleTextAttribute.Call( + uintptr(s.out), + uintptr(s.mapStyle(StyleDefault))) +} + +func (s *cScreen) engage() error { + s.Lock() + defer s.Unlock() + if s.running { + return errors.New("already engaged") + } + s.stopQ = make(chan struct{}) + cf, _, e := procCreateEvent.Call( + uintptr(0), + uintptr(1), + uintptr(0), + uintptr(0)) + if cf == uintptr(0) { + return e + } + s.running = true + s.cancelflag = syscall.Handle(cf) + s.enableMouse(s.mouseEnabled) + + if s.vten { + s.setOutMode(modeVtOutput | modeNoAutoNL | modeCookedOut) + } else { + s.setOutMode(0) + } + + s.clearScreen(s.style, s.vten) + s.hideCursor() + + s.cells.Invalidate() + s.hideCursor() + s.resize() + s.draw() + s.doCursor() + + s.wg.Add(1) + go s.scanInput(s.stopQ) + return nil +} + +func (s *cScreen) PostEventWait(ev Event) { + s.evch <- ev +} + +func (s *cScreen) PostEvent(ev Event) error { + select { + case s.evch <- ev: + return nil + default: + return ErrEventQFull + } +} + +func (s *cScreen) ChannelEvents(ch chan<- Event, quit <-chan struct{}) { + defer close(ch) + for { + select { + case <-quit: + return + case <-s.stopQ: + return + case ev := <-s.evch: + select { + case <-quit: + return + case <-s.stopQ: + return + case ch <- ev: + } + } + } +} + +func (s *cScreen) PollEvent() Event { + select { + case <-s.stopQ: + return nil + case ev := <-s.evch: + return ev + } +} + +func (s *cScreen) HasPendingEvent() bool { + return len(s.evch) > 0 +} + +type cursorInfo struct { + size uint32 + visible uint32 +} + +type coord struct { + x int16 + y int16 +} + +func (c coord) uintptr() uintptr { + // little endian, put x first + return uintptr(c.x) | (uintptr(c.y) << 16) +} + +type rect struct { + left int16 + top int16 + right int16 + bottom int16 +} + +func (s *cScreen) emitVtString(vs string) { + esc := utf16.Encode([]rune(vs)) + syscall.WriteConsole(s.out, &esc[0], uint32(len(esc)), nil, nil) +} + +func (s *cScreen) showCursor() { + if s.vten { + s.emitVtString(vtShowCursor) + } else { + s.setCursorInfo(&cursorInfo{size: 100, visible: 1}) + } +} + +func (s *cScreen) hideCursor() { + if s.vten { + s.emitVtString(vtHideCursor) + } else { + s.setCursorInfo(&cursorInfo{size: 1, visible: 0}) + } +} + +func (s *cScreen) ShowCursor(x, y int) { + s.Lock() + if !s.fini { + s.curx = x + s.cury = y + } + s.doCursor() + s.Unlock() +} + +func (s *cScreen) doCursor() { + x, y := s.curx, s.cury + + if x < 0 || y < 0 || x >= s.w || y >= s.h { + s.hideCursor() + } else { + s.setCursorPos(x, y, s.vten) + s.showCursor() + } +} + +func (s *cScreen) HideCursor() { + s.ShowCursor(-1, -1) +} + +type inputRecord struct { + typ uint16 + _ uint16 + data [16]byte +} + +const ( + keyEvent uint16 = 1 + mouseEvent uint16 = 2 + resizeEvent uint16 = 4 + menuEvent uint16 = 8 // don't use + focusEvent uint16 = 16 // don't use +) + +type mouseRecord struct { + x int16 + y int16 + btns uint32 + mod uint32 + flags uint32 +} + +const ( + mouseDoubleClick uint32 = 0x2 + mouseHWheeled uint32 = 0x8 + mouseVWheeled uint32 = 0x4 + mouseMoved uint32 = 0x1 +) + +type resizeRecord struct { + x int16 + y int16 +} + +type keyRecord struct { + isdown int32 + repeat uint16 + kcode uint16 + scode uint16 + ch uint16 + mod uint32 +} + +const ( + // Constants per Microsoft. We don't put the modifiers + // here. + vkCancel = 0x03 + vkBack = 0x08 // Backspace + vkTab = 0x09 + vkClear = 0x0c + vkReturn = 0x0d + vkPause = 0x13 + vkEscape = 0x1b + vkSpace = 0x20 + vkPrior = 0x21 // PgUp + vkNext = 0x22 // PgDn + vkEnd = 0x23 + vkHome = 0x24 + vkLeft = 0x25 + vkUp = 0x26 + vkRight = 0x27 + vkDown = 0x28 + vkPrint = 0x2a + vkPrtScr = 0x2c + vkInsert = 0x2d + vkDelete = 0x2e + vkHelp = 0x2f + vkF1 = 0x70 + vkF2 = 0x71 + vkF3 = 0x72 + vkF4 = 0x73 + vkF5 = 0x74 + vkF6 = 0x75 + vkF7 = 0x76 + vkF8 = 0x77 + vkF9 = 0x78 + vkF10 = 0x79 + vkF11 = 0x7a + vkF12 = 0x7b + vkF13 = 0x7c + vkF14 = 0x7d + vkF15 = 0x7e + vkF16 = 0x7f + vkF17 = 0x80 + vkF18 = 0x81 + vkF19 = 0x82 + vkF20 = 0x83 + vkF21 = 0x84 + vkF22 = 0x85 + vkF23 = 0x86 + vkF24 = 0x87 +) + +var vkKeys = map[uint16]Key{ + vkCancel: KeyCancel, + vkBack: KeyBackspace, + vkTab: KeyTab, + vkClear: KeyClear, + vkPause: KeyPause, + vkPrint: KeyPrint, + vkPrtScr: KeyPrint, + vkPrior: KeyPgUp, + vkNext: KeyPgDn, + vkReturn: KeyEnter, + vkEnd: KeyEnd, + vkHome: KeyHome, + vkLeft: KeyLeft, + vkUp: KeyUp, + vkRight: KeyRight, + vkDown: KeyDown, + vkInsert: KeyInsert, + vkDelete: KeyDelete, + vkHelp: KeyHelp, + vkF1: KeyF1, + vkF2: KeyF2, + vkF3: KeyF3, + vkF4: KeyF4, + vkF5: KeyF5, + vkF6: KeyF6, + vkF7: KeyF7, + vkF8: KeyF8, + vkF9: KeyF9, + vkF10: KeyF10, + vkF11: KeyF11, + vkF12: KeyF12, + vkF13: KeyF13, + vkF14: KeyF14, + vkF15: KeyF15, + vkF16: KeyF16, + vkF17: KeyF17, + vkF18: KeyF18, + vkF19: KeyF19, + vkF20: KeyF20, + vkF21: KeyF21, + vkF22: KeyF22, + vkF23: KeyF23, + vkF24: KeyF24, +} + +// NB: All Windows platforms are little endian. We assume this +// never, ever change. The following code is endian safe. and does +// not use unsafe pointers. +func getu32(v []byte) uint32 { + return uint32(v[0]) + (uint32(v[1]) << 8) + (uint32(v[2]) << 16) + (uint32(v[3]) << 24) +} +func geti32(v []byte) int32 { + return int32(getu32(v)) +} +func getu16(v []byte) uint16 { + return uint16(v[0]) + (uint16(v[1]) << 8) +} +func geti16(v []byte) int16 { + return int16(getu16(v)) +} + +// Convert windows dwControlKeyState to modifier mask +func mod2mask(cks uint32) ModMask { + mm := ModNone + // Left or right control + if (cks & (0x0008 | 0x0004)) != 0 { + mm |= ModCtrl + } + // Left or right alt + if (cks & (0x0002 | 0x0001)) != 0 { + mm |= ModAlt + } + // Any shift + if (cks & 0x0010) != 0 { + mm |= ModShift + } + return mm +} + +func mrec2btns(mbtns, flags uint32) ButtonMask { + btns := ButtonNone + if mbtns&0x1 != 0 { + btns |= Button1 + } + if mbtns&0x2 != 0 { + btns |= Button2 + } + if mbtns&0x4 != 0 { + btns |= Button3 + } + if mbtns&0x8 != 0 { + btns |= Button4 + } + if mbtns&0x10 != 0 { + btns |= Button5 + } + if mbtns&0x20 != 0 { + btns |= Button6 + } + if mbtns&0x40 != 0 { + btns |= Button7 + } + if mbtns&0x80 != 0 { + btns |= Button8 + } + + if flags&mouseVWheeled != 0 { + if mbtns&0x80000000 == 0 { + btns |= WheelUp + } else { + btns |= WheelDown + } + } + if flags&mouseHWheeled != 0 { + if mbtns&0x80000000 == 0 { + btns |= WheelRight + } else { + btns |= WheelLeft + } + } + return btns +} + +func (s *cScreen) getConsoleInput() error { + // cancelFlag comes first as WaitForMultipleObjects returns the lowest index + // in the event that both events are signalled. + waitObjects := []syscall.Handle{s.cancelflag, s.in} + // As arrays are contiguous in memory, a pointer to the first object is the + // same as a pointer to the array itself. + pWaitObjects := unsafe.Pointer(&waitObjects[0]) + + rv, _, er := procWaitForMultipleObjects.Call( + uintptr(len(waitObjects)), + uintptr(pWaitObjects), + uintptr(0), + w32Infinite) + // WaitForMultipleObjects returns WAIT_OBJECT_0 + the index. + switch rv { + case w32WaitObject0: // s.cancelFlag + return errors.New("cancelled") + case w32WaitObject0 + 1: // s.in + rec := &inputRecord{} + var nrec int32 + rv, _, er := procReadConsoleInput.Call( + uintptr(s.in), + uintptr(unsafe.Pointer(rec)), + uintptr(1), + uintptr(unsafe.Pointer(&nrec))) + if rv == 0 { + return er + } + if nrec != 1 { + return nil + } + switch rec.typ { + case keyEvent: + krec := &keyRecord{} + krec.isdown = geti32(rec.data[0:]) + krec.repeat = getu16(rec.data[4:]) + krec.kcode = getu16(rec.data[6:]) + krec.scode = getu16(rec.data[8:]) + krec.ch = getu16(rec.data[10:]) + krec.mod = getu32(rec.data[12:]) + + if krec.isdown == 0 || krec.repeat < 1 { + // its a key release event, ignore it + return nil + } + if krec.ch != 0 { + // synthesized key code + for krec.repeat > 0 { + // convert shift+tab to backtab + if mod2mask(krec.mod) == ModShift && krec.ch == vkTab { + s.PostEventWait(NewEventKey(KeyBacktab, 0, + ModNone)) + } else { + s.PostEventWait(NewEventKey(KeyRune, rune(krec.ch), + mod2mask(krec.mod))) + } + krec.repeat-- + } + return nil + } + key := KeyNUL // impossible on Windows + ok := false + if key, ok = vkKeys[krec.kcode]; !ok { + return nil + } + for krec.repeat > 0 { + s.PostEventWait(NewEventKey(key, rune(krec.ch), + mod2mask(krec.mod))) + krec.repeat-- + } + + case mouseEvent: + var mrec mouseRecord + mrec.x = geti16(rec.data[0:]) + mrec.y = geti16(rec.data[2:]) + mrec.btns = getu32(rec.data[4:]) + mrec.mod = getu32(rec.data[8:]) + mrec.flags = getu32(rec.data[12:]) + btns := mrec2btns(mrec.btns, mrec.flags) + // we ignore double click, events are delivered normally + s.PostEventWait(NewEventMouse(int(mrec.x), int(mrec.y), btns, + mod2mask(mrec.mod))) + + case resizeEvent: + var rrec resizeRecord + rrec.x = geti16(rec.data[0:]) + rrec.y = geti16(rec.data[2:]) + s.PostEventWait(NewEventResize(int(rrec.x), int(rrec.y))) + + default: + } + default: + return er + } + + return nil +} + +func (s *cScreen) scanInput(stopQ chan struct{}) { + defer s.wg.Done() + for { + select { + case <-stopQ: + return + default: + } + if e := s.getConsoleInput(); e != nil { + return + } + } +} + +// Windows console can display 8 characters, in either low or high intensity +func (s *cScreen) Colors() int { + if s.vten { + return 1 << 24 + } + return 16 +} + +var vgaColors = map[Color]uint16{ + ColorBlack: 0, + ColorMaroon: 0x4, + ColorGreen: 0x2, + ColorNavy: 0x1, + ColorOlive: 0x6, + ColorPurple: 0x5, + ColorTeal: 0x3, + ColorSilver: 0x7, + ColorGrey: 0x8, + ColorRed: 0xc, + ColorLime: 0xa, + ColorBlue: 0x9, + ColorYellow: 0xe, + ColorFuchsia: 0xd, + ColorAqua: 0xb, + ColorWhite: 0xf, +} + +// Windows uses RGB signals +func mapColor2RGB(c Color) uint16 { + winLock.Lock() + if v, ok := winColors[c]; ok { + c = v + } else { + v = FindColor(c, winPalette) + winColors[c] = v + c = v + } + winLock.Unlock() + + if vc, ok := vgaColors[c]; ok { + return vc + } + return 0 +} + +// Map a tcell style to Windows attributes +func (s *cScreen) mapStyle(style Style) uint16 { + f, b, a := style.Decompose() + fa := s.oscreen.attrs & 0xf + ba := (s.oscreen.attrs) >> 4 & 0xf + if f != ColorDefault && f != ColorReset { + fa = mapColor2RGB(f) + } + if b != ColorDefault && b != ColorReset { + ba = mapColor2RGB(b) + } + var attr uint16 + // We simulate reverse by doing the color swap ourselves. + // Apparently windows cannot really do this except in DBCS + // views. + if a&AttrReverse != 0 { + attr = ba + attr |= (fa << 4) + } else { + attr = fa + attr |= (ba << 4) + } + if a&AttrBold != 0 { + attr |= 0x8 + } + if a&AttrDim != 0 { + attr &^= 0x8 + } + if a&AttrUnderline != 0 { + // Best effort -- doesn't seem to work though. + attr |= 0x8000 + } + // Blink is unsupported + return attr +} + +func (s *cScreen) SetCell(x, y int, style Style, ch ...rune) { + if len(ch) > 0 { + s.SetContent(x, y, ch[0], ch[1:], style) + } else { + s.SetContent(x, y, ' ', nil, style) + } +} + +func (s *cScreen) SetContent(x, y int, mainc rune, combc []rune, style Style) { + s.Lock() + if !s.fini { + s.cells.SetContent(x, y, mainc, combc, style) + } + s.Unlock() +} + +func (s *cScreen) GetContent(x, y int) (rune, []rune, Style, int) { + s.Lock() + mainc, combc, style, width := s.cells.GetContent(x, y) + s.Unlock() + return mainc, combc, style, width +} + +func (s *cScreen) sendVtStyle(style Style) { + esc := &strings.Builder{} + + fg, bg, attrs := style.Decompose() + + esc.WriteString(vtSgr0) + + if attrs&(AttrBold|AttrDim) == AttrBold { + esc.WriteString(vtBold) + } + if attrs&AttrBlink != 0 { + esc.WriteString(vtBlink) + } + if attrs&AttrUnderline != 0 { + esc.WriteString(vtUnderline) + } + if attrs&AttrReverse != 0 { + esc.WriteString(vtReverse) + } + if fg.IsRGB() { + r, g, b := fg.RGB() + fmt.Fprintf(esc, vtSetFgRGB, r, g, b) + } else if fg.Valid() { + fmt.Fprintf(esc, vtSetFg, fg&0xff) + } + if bg.IsRGB() { + r, g, b := bg.RGB() + fmt.Fprintf(esc, vtSetBgRGB, r, g, b) + } else if bg.Valid() { + fmt.Fprintf(esc, vtSetBg, bg&0xff) + } + s.emitVtString(esc.String()) +} + +func (s *cScreen) writeString(x, y int, style Style, ch []uint16) { + // we assume the caller has hidden the cursor + if len(ch) == 0 { + return + } + s.setCursorPos(x, y, s.vten) + + if s.vten { + s.sendVtStyle(style) + } else { + procSetConsoleTextAttribute.Call( + uintptr(s.out), + uintptr(s.mapStyle(style))) + } + syscall.WriteConsole(s.out, &ch[0], uint32(len(ch)), nil, nil) +} + +func (s *cScreen) draw() { + // allocate a scratch line bit enough for no combining chars. + // if you have combining characters, you may pay for extra allocs. + if s.clear { + s.clearScreen(s.style, s.vten) + s.clear = false + s.cells.Invalidate() + } + buf := make([]uint16, 0, s.w) + wcs := buf[:] + lstyle := styleInvalid + + lx, ly := -1, -1 + ra := make([]rune, 1) + + for y := 0; y < s.h; y++ { + for x := 0; x < s.w; x++ { + mainc, combc, style, width := s.cells.GetContent(x, y) + dirty := s.cells.Dirty(x, y) + if style == StyleDefault { + style = s.style + } + + if !dirty || style != lstyle { + // write out any data queued thus far + // because we are going to skip over some + // cells, or because we need to change styles + s.writeString(lx, ly, lstyle, wcs) + wcs = buf[0:0] + lstyle = StyleDefault + if !dirty { + continue + } + } + if x > s.w-width { + mainc = ' ' + combc = nil + width = 1 + } + if len(wcs) == 0 { + lstyle = style + lx = x + ly = y + } + ra[0] = mainc + wcs = append(wcs, utf16.Encode(ra)...) + if len(combc) != 0 { + wcs = append(wcs, utf16.Encode(combc)...) + } + for dx := 0; dx < width; dx++ { + s.cells.SetDirty(x+dx, y, false) + } + x += width - 1 + } + s.writeString(lx, ly, lstyle, wcs) + wcs = buf[0:0] + lstyle = styleInvalid + } +} + +func (s *cScreen) Show() { + s.Lock() + if !s.fini { + s.hideCursor() + s.resize() + s.draw() + s.doCursor() + } + s.Unlock() +} + +func (s *cScreen) Sync() { + s.Lock() + if !s.fini { + s.cells.Invalidate() + s.hideCursor() + s.resize() + s.draw() + s.doCursor() + } + s.Unlock() +} + +type consoleInfo struct { + size coord + pos coord + attrs uint16 + win rect + maxsz coord +} + +func (s *cScreen) getConsoleInfo(info *consoleInfo) { + procGetConsoleScreenBufferInfo.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(info))) +} + +func (s *cScreen) getCursorInfo(info *cursorInfo) { + procGetConsoleCursorInfo.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(info))) +} + +func (s *cScreen) setCursorInfo(info *cursorInfo) { + procSetConsoleCursorInfo.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(info))) + +} + +func (s *cScreen) setCursorPos(x, y int, vtEnable bool) { + if vtEnable { + // Note that the string is Y first. Origin is 1,1. + s.emitVtString(fmt.Sprintf(vtCursorPos, y+1, x+1)) + } else { + procSetConsoleCursorPosition.Call( + uintptr(s.out), + coord{int16(x), int16(y)}.uintptr()) + } +} + +func (s *cScreen) setBufferSize(x, y int) { + procSetConsoleScreenBufferSize.Call( + uintptr(s.out), + coord{int16(x), int16(y)}.uintptr()) +} + +func (s *cScreen) Size() (int, int) { + s.Lock() + w, h := s.w, s.h + s.Unlock() + + return w, h +} + +func (s *cScreen) resize() { + info := consoleInfo{} + s.getConsoleInfo(&info) + + w := int((info.win.right - info.win.left) + 1) + h := int((info.win.bottom - info.win.top) + 1) + + if s.w == w && s.h == h { + return + } + + s.cells.Resize(w, h) + s.w = w + s.h = h + + s.setBufferSize(w, h) + + r := rect{0, 0, int16(w - 1), int16(h - 1)} + procSetConsoleWindowInfo.Call( + uintptr(s.out), + uintptr(1), + uintptr(unsafe.Pointer(&r))) + s.PostEvent(NewEventResize(w, h)) +} + +func (s *cScreen) Clear() { + s.Fill(' ', s.style) +} + +func (s *cScreen) Fill(r rune, style Style) { + s.Lock() + if !s.fini { + s.cells.Fill(r, style) + s.clear = true + } + s.Unlock() +} + +func (s *cScreen) clearScreen(style Style, vtEnable bool) { + if vtEnable { + s.sendVtStyle(style) + row := strings.Repeat(" ", s.w) + for y := 0; y < s.h; y++ { + s.setCursorPos(0, y, vtEnable) + s.emitVtString(row) + } + s.setCursorPos(0, 0, vtEnable) + + } else { + pos := coord{0, 0} + attr := s.mapStyle(style) + x, y := s.w, s.h + scratch := uint32(0) + count := uint32(x * y) + + procFillConsoleOutputAttribute.Call( + uintptr(s.out), + uintptr(attr), + uintptr(count), + pos.uintptr(), + uintptr(unsafe.Pointer(&scratch))) + procFillConsoleOutputCharacter.Call( + uintptr(s.out), + uintptr(' '), + uintptr(count), + pos.uintptr(), + uintptr(unsafe.Pointer(&scratch))) + } +} + +const ( + // Input modes + modeExtndFlg uint32 = 0x0080 + modeMouseEn = 0x0010 + modeResizeEn = 0x0008 + modeCooked = 0x0001 + modeVtInput = 0x0200 + + // Output modes + modeCookedOut uint32 = 0x0001 + modeWrapEOL = 0x0002 + modeVtOutput = 0x0004 + modeNoAutoNL = 0x0008 +) + +func (s *cScreen) setInMode(mode uint32) error { + rv, _, err := procSetConsoleMode.Call( + uintptr(s.in), + uintptr(mode)) + if rv == 0 { + return err + } + return nil +} + +func (s *cScreen) setOutMode(mode uint32) error { + rv, _, err := procSetConsoleMode.Call( + uintptr(s.out), + uintptr(mode)) + if rv == 0 { + return err + } + return nil +} + +func (s *cScreen) getInMode(v *uint32) { + procGetConsoleMode.Call( + uintptr(s.in), + uintptr(unsafe.Pointer(v))) +} + +func (s *cScreen) getOutMode(v *uint32) { + procGetConsoleMode.Call( + uintptr(s.out), + uintptr(unsafe.Pointer(v))) +} + +func (s *cScreen) SetStyle(style Style) { + s.Lock() + s.style = style + s.Unlock() +} + +// No fallback rune support, since we have Unicode. Yay! + +func (s *cScreen) RegisterRuneFallback(r rune, subst string) { +} + +func (s *cScreen) UnregisterRuneFallback(r rune) { +} + +func (s *cScreen) CanDisplay(r rune, checkFallbacks bool) bool { + // We presume we can display anything -- we're Unicode. + // (Sadly this not precisely true. Combinings are especially + // poorly supported under Windows.) + return true +} + +func (s *cScreen) HasMouse() bool { + return true +} + +func (s *cScreen) Resize(int, int, int, int) {} + +func (s *cScreen) HasKey(k Key) bool { + // Microsoft has codes for some keys, but they are unusual, + // so we don't include them. We include all the typical + // 101, 105 key layout keys. + valid := map[Key]bool{ + KeyBackspace: true, + KeyTab: true, + KeyEscape: true, + KeyPause: true, + KeyPrint: true, + KeyPgUp: true, + KeyPgDn: true, + KeyEnter: true, + KeyEnd: true, + KeyHome: true, + KeyLeft: true, + KeyUp: true, + KeyRight: true, + KeyDown: true, + KeyInsert: true, + KeyDelete: true, + KeyF1: true, + KeyF2: true, + KeyF3: true, + KeyF4: true, + KeyF5: true, + KeyF6: true, + KeyF7: true, + KeyF8: true, + KeyF9: true, + KeyF10: true, + KeyF11: true, + KeyF12: true, + KeyRune: true, + } + + return valid[k] +} + +func (s *cScreen) Beep() error { + // A simple beep. If the sound card is not available, the sound is generated + // using the speaker. + // + // Reference: + // https://docs.microsoft.com/en-us/windows/win32/api/winuser/nf-winuser-messagebeep + const simpleBeep = 0xffffffff + if rv, _, err := procMessageBeep.Call(simpleBeep); rv == 0 { + return err + } + return nil +} + +func (s *cScreen) Suspend() error { + s.disengage() + return nil +} + +func (s *cScreen) Resume() error { + return s.engage() +} diff --git a/vendor/github.com/gdamore/tcell/v2/doc.go b/vendor/github.com/gdamore/tcell/v2/doc.go new file mode 100644 index 0000000000..b671961359 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/doc.go @@ -0,0 +1,48 @@ +// Copyright 2018 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tcell provides a lower-level, portable API for building +// programs that interact with terminals or consoles. It works with +// both common (and many uncommon!) terminals or terminal emulators, +// and Windows console implementations. +// +// It provides support for up to 256 colors, text attributes, and box drawing +// elements. A database of terminals built from a real terminfo database +// is provided, along with code to generate new database entries. +// +// Tcell offers very rich support for mice, dependent upon the terminal +// of course. (Windows, XTerm, and iTerm 2 are known to work very well.) +// +// If the environment is not Unicode by default, such as an ISO8859 based +// locale or GB18030, Tcell can convert input and output, so that your +// terminal can operate in whatever locale is most convenient, while the +// application program can just assume "everything is UTF-8". Reasonable +// defaults are used for updating characters to something suitable for +// display. Unicode box drawing characters will be converted to use the +// alternate character set of your terminal, if native conversions are +// not available. If no ACS is available, then some ASCII fallbacks will +// be used. +// +// Note that support for non-UTF-8 locales (other than C) must be enabled +// by the application using RegisterEncoding() -- we don't have them all +// enabled by default to avoid bloating the application unneccessarily. +// (These days UTF-8 is good enough for almost everyone, and nobody should +// be using legacy locales anymore.) Also, actual glyphs for various code +// point will only be displayed if your terminal or emulator (or the font +// the emulator is using) supports them. +// +// A rich set of keycodes is supported, with support for up to 65 function +// keys, and various other special keys. +// +package tcell diff --git a/vendor/github.com/gdamore/tcell/v2/encoding.go b/vendor/github.com/gdamore/tcell/v2/encoding.go new file mode 100644 index 0000000000..596a6e8005 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/encoding.go @@ -0,0 +1,139 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "strings" + "sync" + + "golang.org/x/text/encoding" + + gencoding "github.com/gdamore/encoding" +) + +var encodings map[string]encoding.Encoding +var encodingLk sync.Mutex +var encodingFallback EncodingFallback = EncodingFallbackFail + +// RegisterEncoding may be called by the application to register an encoding. +// The presence of additional encodings will facilitate application usage with +// terminal environments where the I/O subsystem does not support Unicode. +// +// Windows systems use Unicode natively, and do not need any of the encoding +// subsystem when using Windows Console screens. +// +// Please see the Go documentation for golang.org/x/text/encoding -- most of +// the common ones exist already as stock variables. For example, ISO8859-15 +// can be registered using the following code: +// +// import "golang.org/x/text/encoding/charmap" +// +// ... +// RegisterEncoding("ISO8859-15", charmap.ISO8859_15) +// +// Aliases can be registered as well, for example "8859-15" could be an alias +// for "ISO8859-15". +// +// For POSIX systems, the tcell package will check the environment variables +// LC_ALL, LC_CTYPE, and LANG (in that order) to determine the character set. +// These are expected to have the following pattern: +// +// $language[.$codeset[@$variant] +// +// We extract only the $codeset part, which will usually be something like +// UTF-8 or ISO8859-15 or KOI8-R. Note that if the locale is either "POSIX" +// or "C", then we assume US-ASCII (the POSIX 'portable character set' +// and assume all other characters are somehow invalid.) +// +// Modern POSIX systems and terminal emulators may use UTF-8, and for those +// systems, this API is also unnecessary. For example, Darwin (MacOS X) and +// modern Linux running modern xterm generally will out of the box without +// any of this. Use of UTF-8 is recommended when possible, as it saves +// quite a lot processing overhead. +// +// Note that some encodings are quite large (for example GB18030 which is a +// superset of Unicode) and so the application size can be expected ot +// increase quite a bit as each encoding is added. The East Asian encodings +// have been seen to add 100-200K per encoding to the application size. +// +func RegisterEncoding(charset string, enc encoding.Encoding) { + encodingLk.Lock() + charset = strings.ToLower(charset) + encodings[charset] = enc + encodingLk.Unlock() +} + +// EncodingFallback describes how the system behavees when the locale +// requires a character set that we do not support. The system always +// supports UTF-8 and US-ASCII. On Windows consoles, UTF-16LE is also +// supported automatically. Other character sets must be added using the +// RegisterEncoding API. (A large group of nearly all of them can be +// added using the RegisterAll function in the encoding sub package.) +type EncodingFallback int + +const ( + // EncodingFallbackFail behavior causes GetEncoding to fail + // when it cannot find an encoding. + EncodingFallbackFail = iota + + // EncodingFallbackASCII behaviore causes GetEncoding to fall back + // to a 7-bit ASCII encoding, if no other encoding can be found. + EncodingFallbackASCII + + // EncodingFallbackUTF8 behavior causes GetEncoding to assume + // UTF8 can pass unmodified upon failure. Note that this behavior + // is not recommended, unless you are sure your terminal can cope + // with real UTF8 sequences. + EncodingFallbackUTF8 +) + +// SetEncodingFallback changes the behavior of GetEncoding when a suitable +// encoding is not found. The default is EncodingFallbackFail, which +// causes GetEncoding to simply return nil. +func SetEncodingFallback(fb EncodingFallback) { + encodingLk.Lock() + encodingFallback = fb + encodingLk.Unlock() +} + +// GetEncoding is used by Screen implementors who want to locate an encoding +// for the given character set name. Note that this will return nil for +// either the Unicode (UTF-8) or ASCII encodings, since we don't use +// encodings for them but instead have our own native methods. +func GetEncoding(charset string) encoding.Encoding { + charset = strings.ToLower(charset) + encodingLk.Lock() + defer encodingLk.Unlock() + if enc, ok := encodings[charset]; ok { + return enc + } + switch encodingFallback { + case EncodingFallbackASCII: + return gencoding.ASCII + case EncodingFallbackUTF8: + return encoding.Nop + } + return nil +} + +func init() { + // We always support UTF-8 and ASCII. + encodings = make(map[string]encoding.Encoding) + encodings["utf-8"] = gencoding.UTF8 + encodings["utf8"] = gencoding.UTF8 + encodings["us-ascii"] = gencoding.ASCII + encodings["ascii"] = gencoding.ASCII + encodings["iso646"] = gencoding.ASCII +} diff --git a/vendor/github.com/gdamore/tcell/v2/errors.go b/vendor/github.com/gdamore/tcell/v2/errors.go new file mode 100644 index 0000000000..201dff9f80 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/errors.go @@ -0,0 +1,73 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "errors" + "time" + + "github.com/gdamore/tcell/v2/terminfo" +) + +var ( + // ErrTermNotFound indicates that a suitable terminal entry could + // not be found. This can result from either not having TERM set, + // or from the TERM failing to support certain minimal functionality, + // in particular absolute cursor addressability (the cup capability) + // is required. For example, legacy "adm3" lacks this capability, + // whereas the slightly newer "adm3a" supports it. This failure + // occurs most often with "dumb". + ErrTermNotFound = terminfo.ErrTermNotFound + + // ErrNoScreen indicates that no suitable screen could be found. + // This may result from attempting to run on a platform where there + // is no support for either termios or console I/O (such as nacl), + // or from running in an environment where there is no access to + // a suitable console/terminal device. (For example, running on + // without a controlling TTY or with no /dev/tty on POSIX platforms.) + ErrNoScreen = errors.New("no suitable screen available") + + // ErrNoCharset indicates that the locale environment the + // program is not supported by the program, because no suitable + // encoding was found for it. This problem never occurs if + // the environment is UTF-8 or UTF-16. + ErrNoCharset = errors.New("character set not supported") + + // ErrEventQFull indicates that the event queue is full, and + // cannot accept more events. + ErrEventQFull = errors.New("event queue full") +) + +// An EventError is an event representing some sort of error, and carries +// an error payload. +type EventError struct { + t time.Time + err error +} + +// When returns the time when the event was created. +func (ev *EventError) When() time.Time { + return ev.t +} + +// Error implements the error. +func (ev *EventError) Error() string { + return ev.err.Error() +} + +// NewEventError creates an ErrorEvent with the given error payload. +func NewEventError(err error) *EventError { + return &EventError{t: time.Now(), err: err} +} diff --git a/vendor/github.com/gdamore/tcell/v2/event.go b/vendor/github.com/gdamore/tcell/v2/event.go new file mode 100644 index 0000000000..a3b770063b --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/event.go @@ -0,0 +1,53 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// Event is a generic interface used for passing around Events. +// Concrete types follow. +type Event interface { + // When reports the time when the event was generated. + When() time.Time +} + +// EventTime is a simple base event class, suitable for easy reuse. +// It can be used to deliver actual timer events as well. +type EventTime struct { + when time.Time +} + +// When returns the time stamp when the event occurred. +func (e *EventTime) When() time.Time { + return e.when +} + +// SetEventTime sets the time of occurrence for the event. +func (e *EventTime) SetEventTime(t time.Time) { + e.when = t +} + +// SetEventNow sets the time of occurrence for the event to the current time. +func (e *EventTime) SetEventNow() { + e.SetEventTime(time.Now()) +} + +// EventHandler is anything that handles events. If the handler has +// consumed the event, it should return true. False otherwise. +type EventHandler interface { + HandleEvent(Event) bool +} diff --git a/vendor/github.com/gdamore/tcell/v2/go.mod b/vendor/github.com/gdamore/tcell/v2/go.mod new file mode 100644 index 0000000000..af97fa00bc --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/go.mod @@ -0,0 +1,12 @@ +module github.com/gdamore/tcell/v2 + +go 1.12 + +require ( + github.com/gdamore/encoding v1.0.0 + github.com/lucasb-eyer/go-colorful v1.0.3 + github.com/mattn/go-runewidth v0.0.10 + golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 + golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf + golang.org/x/text v0.3.0 +) diff --git a/vendor/github.com/gdamore/tcell/v2/go.sum b/vendor/github.com/gdamore/tcell/v2/go.sum new file mode 100644 index 0000000000..068fabd988 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/go.sum @@ -0,0 +1,14 @@ +github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= +github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= +github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac= +github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0= +github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg= +github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= +github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= +github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= +golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/gdamore/tcell/v2/interrupt.go b/vendor/github.com/gdamore/tcell/v2/interrupt.go new file mode 100644 index 0000000000..70dddfce2f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/interrupt.go @@ -0,0 +1,41 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// EventInterrupt is a generic wakeup event. Its can be used to +// to request a redraw. It can carry an arbitrary payload, as well. +type EventInterrupt struct { + t time.Time + v interface{} +} + +// When returns the time when this event was created. +func (ev *EventInterrupt) When() time.Time { + return ev.t +} + +// Data is used to obtain the opaque event payload. +func (ev *EventInterrupt) Data() interface{} { + return ev.v +} + +// NewEventInterrupt creates an EventInterrupt with the given payload. +func NewEventInterrupt(data interface{}) *EventInterrupt { + return &EventInterrupt{t: time.Now(), v: data} +} diff --git a/vendor/github.com/gdamore/tcell/v2/key.go b/vendor/github.com/gdamore/tcell/v2/key.go new file mode 100644 index 0000000000..9741e699fe --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/key.go @@ -0,0 +1,470 @@ +// Copyright 2016 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "fmt" + "strings" + "time" +) + +// EventKey represents a key press. Usually this is a key press followed +// by a key release, but since terminal programs don't have a way to report +// key release events, we usually get just one event. If a key is held down +// then the terminal may synthesize repeated key presses at some predefined +// rate. We have no control over that, nor visibility into it. +// +// In some cases, we can have a modifier key, such as ModAlt, that can be +// generated with a key press. (This usually is represented by having the +// high bit set, or in some cases, by sending an ESC prior to the rune.) +// +// If the value of Key() is KeyRune, then the actual key value will be +// available with the Rune() method. This will be the case for most keys. +// In most situations, the modifiers will not be set. For example, if the +// rune is 'A', this will be reported without the ModShift bit set, since +// really can't tell if the Shift key was pressed (it might have been CAPSLOCK, +// or a terminal that only can send capitals, or keyboard with separate +// capital letters from lower case letters). +// +// Generally, terminal applications have far less visibility into keyboard +// activity than graphical applications. Hence, they should avoid depending +// overly much on availability of modifiers, or the availability of any +// specific keys. +type EventKey struct { + t time.Time + mod ModMask + key Key + ch rune +} + +// When returns the time when this Event was created, which should closely +// match the time when the key was pressed. +func (ev *EventKey) When() time.Time { + return ev.t +} + +// Rune returns the rune corresponding to the key press, if it makes sense. +// The result is only defined if the value of Key() is KeyRune. +func (ev *EventKey) Rune() rune { + return ev.ch +} + +// Key returns a virtual key code. We use this to identify specific key +// codes, such as KeyEnter, etc. Most control and function keys are reported +// with unique Key values. Normal alphanumeric and punctuation keys will +// generally return KeyRune here; the specific key can be further decoded +// using the Rune() function. +func (ev *EventKey) Key() Key { + return ev.key +} + +// Modifiers returns the modifiers that were present with the key press. Note +// that not all platforms and terminals support this equally well, and some +// cases we will not not know for sure. Hence, applications should avoid +// using this in most circumstances. +func (ev *EventKey) Modifiers() ModMask { + return ev.mod +} + +// KeyNames holds the written names of special keys. Useful to echo back a key +// name, or to look up a key from a string value. +var KeyNames = map[Key]string{ + KeyEnter: "Enter", + KeyBackspace: "Backspace", + KeyTab: "Tab", + KeyBacktab: "Backtab", + KeyEsc: "Esc", + KeyBackspace2: "Backspace2", + KeyDelete: "Delete", + KeyInsert: "Insert", + KeyUp: "Up", + KeyDown: "Down", + KeyLeft: "Left", + KeyRight: "Right", + KeyHome: "Home", + KeyEnd: "End", + KeyUpLeft: "UpLeft", + KeyUpRight: "UpRight", + KeyDownLeft: "DownLeft", + KeyDownRight: "DownRight", + KeyCenter: "Center", + KeyPgDn: "PgDn", + KeyPgUp: "PgUp", + KeyClear: "Clear", + KeyExit: "Exit", + KeyCancel: "Cancel", + KeyPause: "Pause", + KeyPrint: "Print", + KeyF1: "F1", + KeyF2: "F2", + KeyF3: "F3", + KeyF4: "F4", + KeyF5: "F5", + KeyF6: "F6", + KeyF7: "F7", + KeyF8: "F8", + KeyF9: "F9", + KeyF10: "F10", + KeyF11: "F11", + KeyF12: "F12", + KeyF13: "F13", + KeyF14: "F14", + KeyF15: "F15", + KeyF16: "F16", + KeyF17: "F17", + KeyF18: "F18", + KeyF19: "F19", + KeyF20: "F20", + KeyF21: "F21", + KeyF22: "F22", + KeyF23: "F23", + KeyF24: "F24", + KeyF25: "F25", + KeyF26: "F26", + KeyF27: "F27", + KeyF28: "F28", + KeyF29: "F29", + KeyF30: "F30", + KeyF31: "F31", + KeyF32: "F32", + KeyF33: "F33", + KeyF34: "F34", + KeyF35: "F35", + KeyF36: "F36", + KeyF37: "F37", + KeyF38: "F38", + KeyF39: "F39", + KeyF40: "F40", + KeyF41: "F41", + KeyF42: "F42", + KeyF43: "F43", + KeyF44: "F44", + KeyF45: "F45", + KeyF46: "F46", + KeyF47: "F47", + KeyF48: "F48", + KeyF49: "F49", + KeyF50: "F50", + KeyF51: "F51", + KeyF52: "F52", + KeyF53: "F53", + KeyF54: "F54", + KeyF55: "F55", + KeyF56: "F56", + KeyF57: "F57", + KeyF58: "F58", + KeyF59: "F59", + KeyF60: "F60", + KeyF61: "F61", + KeyF62: "F62", + KeyF63: "F63", + KeyF64: "F64", + KeyCtrlA: "Ctrl-A", + KeyCtrlB: "Ctrl-B", + KeyCtrlC: "Ctrl-C", + KeyCtrlD: "Ctrl-D", + KeyCtrlE: "Ctrl-E", + KeyCtrlF: "Ctrl-F", + KeyCtrlG: "Ctrl-G", + KeyCtrlJ: "Ctrl-J", + KeyCtrlK: "Ctrl-K", + KeyCtrlL: "Ctrl-L", + KeyCtrlN: "Ctrl-N", + KeyCtrlO: "Ctrl-O", + KeyCtrlP: "Ctrl-P", + KeyCtrlQ: "Ctrl-Q", + KeyCtrlR: "Ctrl-R", + KeyCtrlS: "Ctrl-S", + KeyCtrlT: "Ctrl-T", + KeyCtrlU: "Ctrl-U", + KeyCtrlV: "Ctrl-V", + KeyCtrlW: "Ctrl-W", + KeyCtrlX: "Ctrl-X", + KeyCtrlY: "Ctrl-Y", + KeyCtrlZ: "Ctrl-Z", + KeyCtrlSpace: "Ctrl-Space", + KeyCtrlUnderscore: "Ctrl-_", + KeyCtrlRightSq: "Ctrl-]", + KeyCtrlBackslash: "Ctrl-\\", + KeyCtrlCarat: "Ctrl-^", +} + +// Name returns a printable value or the key stroke. This can be used +// when printing the event, for example. +func (ev *EventKey) Name() string { + s := "" + m := []string{} + if ev.mod&ModShift != 0 { + m = append(m, "Shift") + } + if ev.mod&ModAlt != 0 { + m = append(m, "Alt") + } + if ev.mod&ModMeta != 0 { + m = append(m, "Meta") + } + if ev.mod&ModCtrl != 0 { + m = append(m, "Ctrl") + } + + ok := false + if s, ok = KeyNames[ev.key]; !ok { + if ev.key == KeyRune { + s = "Rune[" + string(ev.ch) + "]" + } else { + s = fmt.Sprintf("Key[%d,%d]", ev.key, int(ev.ch)) + } + } + if len(m) != 0 { + if ev.mod&ModCtrl != 0 && strings.HasPrefix(s, "Ctrl-") { + s = s[5:] + } + return fmt.Sprintf("%s+%s", strings.Join(m, "+"), s) + } + return s +} + +// NewEventKey attempts to create a suitable event. It parses the various +// ASCII control sequences if KeyRune is passed for Key, but if the caller +// has more precise information it should set that specifically. Callers +// that aren't sure about modifier state (most) should just pass ModNone. +func NewEventKey(k Key, ch rune, mod ModMask) *EventKey { + if k == KeyRune && (ch < ' ' || ch == 0x7f) { + // Turn specials into proper key codes. This is for + // control characters and the DEL. + k = Key(ch) + if mod == ModNone && ch < ' ' { + switch Key(ch) { + case KeyBackspace, KeyTab, KeyEsc, KeyEnter: + // these keys are directly typeable without CTRL + default: + // most likely entered with a CTRL keypress + mod = ModCtrl + } + } + } + return &EventKey{t: time.Now(), key: k, ch: ch, mod: mod} +} + +// ModMask is a mask of modifier keys. Note that it will not always be +// possible to report modifier keys. +type ModMask int16 + +// These are the modifiers keys that can be sent either with a key press, +// or a mouse event. Note that as of now, due to the confusion associated +// with Meta, and the lack of support for it on many/most platforms, the +// current implementations never use it. Instead, they use ModAlt, even for +// events that could possibly have been distinguished from ModAlt. +const ( + ModShift ModMask = 1 << iota + ModCtrl + ModAlt + ModMeta + ModNone ModMask = 0 +) + +// Key is a generic value for representing keys, and especially special +// keys (function keys, cursor movement keys, etc.) For normal keys, like +// ASCII letters, we use KeyRune, and then expect the application to +// inspect the Rune() member of the EventKey. +type Key int16 + +// This is the list of named keys. KeyRune is special however, in that it is +// a place holder key indicating that a printable character was sent. The +// actual value of the rune will be transported in the Rune of the associated +// EventKey. +const ( + KeyRune Key = iota + 256 + KeyUp + KeyDown + KeyRight + KeyLeft + KeyUpLeft + KeyUpRight + KeyDownLeft + KeyDownRight + KeyCenter + KeyPgUp + KeyPgDn + KeyHome + KeyEnd + KeyInsert + KeyDelete + KeyHelp + KeyExit + KeyClear + KeyCancel + KeyPrint + KeyPause + KeyBacktab + KeyF1 + KeyF2 + KeyF3 + KeyF4 + KeyF5 + KeyF6 + KeyF7 + KeyF8 + KeyF9 + KeyF10 + KeyF11 + KeyF12 + KeyF13 + KeyF14 + KeyF15 + KeyF16 + KeyF17 + KeyF18 + KeyF19 + KeyF20 + KeyF21 + KeyF22 + KeyF23 + KeyF24 + KeyF25 + KeyF26 + KeyF27 + KeyF28 + KeyF29 + KeyF30 + KeyF31 + KeyF32 + KeyF33 + KeyF34 + KeyF35 + KeyF36 + KeyF37 + KeyF38 + KeyF39 + KeyF40 + KeyF41 + KeyF42 + KeyF43 + KeyF44 + KeyF45 + KeyF46 + KeyF47 + KeyF48 + KeyF49 + KeyF50 + KeyF51 + KeyF52 + KeyF53 + KeyF54 + KeyF55 + KeyF56 + KeyF57 + KeyF58 + KeyF59 + KeyF60 + KeyF61 + KeyF62 + KeyF63 + KeyF64 +) + +const ( + // These key codes are used internally, and will never appear to applications. + keyPasteStart Key = iota + 16384 + keyPasteEnd +) + +// These are the control keys. Note that they overlap with other keys, +// perhaps. For example, KeyCtrlH is the same as KeyBackspace. +const ( + KeyCtrlSpace Key = iota + KeyCtrlA + KeyCtrlB + KeyCtrlC + KeyCtrlD + KeyCtrlE + KeyCtrlF + KeyCtrlG + KeyCtrlH + KeyCtrlI + KeyCtrlJ + KeyCtrlK + KeyCtrlL + KeyCtrlM + KeyCtrlN + KeyCtrlO + KeyCtrlP + KeyCtrlQ + KeyCtrlR + KeyCtrlS + KeyCtrlT + KeyCtrlU + KeyCtrlV + KeyCtrlW + KeyCtrlX + KeyCtrlY + KeyCtrlZ + KeyCtrlLeftSq // Escape + KeyCtrlBackslash + KeyCtrlRightSq + KeyCtrlCarat + KeyCtrlUnderscore +) + +// Special values - these are fixed in an attempt to make it more likely +// that aliases will encode the same way. + +// These are the defined ASCII values for key codes. They generally match +// with KeyCtrl values. +const ( + KeyNUL Key = iota + KeySOH + KeySTX + KeyETX + KeyEOT + KeyENQ + KeyACK + KeyBEL + KeyBS + KeyTAB + KeyLF + KeyVT + KeyFF + KeyCR + KeySO + KeySI + KeyDLE + KeyDC1 + KeyDC2 + KeyDC3 + KeyDC4 + KeyNAK + KeySYN + KeyETB + KeyCAN + KeyEM + KeySUB + KeyESC + KeyFS + KeyGS + KeyRS + KeyUS + KeyDEL Key = 0x7F +) + +// These keys are aliases for other names. +const ( + KeyBackspace = KeyBS + KeyTab = KeyTAB + KeyEsc = KeyESC + KeyEscape = KeyESC + KeyEnter = KeyCR + KeyBackspace2 = KeyDEL +) diff --git a/vendor/github.com/gdamore/tcell/v2/mouse.go b/vendor/github.com/gdamore/tcell/v2/mouse.go new file mode 100644 index 0000000000..008c2e26c3 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/mouse.go @@ -0,0 +1,103 @@ +// Copyright 2020 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// EventMouse is a mouse event. It is sent on either mouse up or mouse down +// events. It is also sent on mouse motion events - if the terminal supports +// it. We make every effort to ensure that mouse release events are delivered. +// Hence, click drag can be identified by a motion event with the mouse down, +// without any intervening button release. On some terminals only the initiating +// press and terminating release event will be delivered. +// +// Mouse wheel events, when reported, may appear on their own as individual +// impulses; that is, there will normally not be a release event delivered +// for mouse wheel movements. +// +// Most terminals cannot report the state of more than one button at a time -- +// and some cannot report motion events unless a button is pressed. +// +// Applications can inspect the time between events to resolve double or +// triple clicks. +type EventMouse struct { + t time.Time + btn ButtonMask + mod ModMask + x int + y int +} + +// When returns the time when this EventMouse was created. +func (ev *EventMouse) When() time.Time { + return ev.t +} + +// Buttons returns the list of buttons that were pressed or wheel motions. +func (ev *EventMouse) Buttons() ButtonMask { + return ev.btn +} + +// Modifiers returns a list of keyboard modifiers that were pressed +// with the mouse button(s). +func (ev *EventMouse) Modifiers() ModMask { + return ev.mod +} + +// Position returns the mouse position in character cells. The origin +// 0, 0 is at the upper left corner. +func (ev *EventMouse) Position() (int, int) { + return ev.x, ev.y +} + +// NewEventMouse is used to create a new mouse event. Applications +// shouldn't need to use this; its mostly for screen implementors. +func NewEventMouse(x, y int, btn ButtonMask, mod ModMask) *EventMouse { + return &EventMouse{t: time.Now(), x: x, y: y, btn: btn, mod: mod} +} + +// ButtonMask is a mask of mouse buttons and wheel events. Mouse button presses +// are normally delivered as both press and release events. Mouse wheel events +// are normally just single impulse events. Windows supports up to eight +// separate buttons plus all four wheel directions, but XTerm can only support +// mouse buttons 1-3 and wheel up/down. Its not unheard of for terminals +// to support only one or two buttons (think Macs). Old terminals, and true +// emulations (such as vt100) won't support mice at all, of course. +type ButtonMask int16 + +// These are the actual button values. Note that tcell version 1.x reversed buttons +// two and three on *nix based terminals. We use button 1 as the primary, and +// button 2 as the secondary, and button 3 (which is often missing) as the middle. +const ( + Button1 ButtonMask = 1 << iota // Usually the left (primary) mouse button. + Button2 // Usually the right (secondary) mouse button. + Button3 // Usually the middle mouse button. + Button4 // Often a side button (thumb/next). + Button5 // Often a side button (thumb/prev). + Button6 + Button7 + Button8 + WheelUp // Wheel motion up/away from user. + WheelDown // Wheel motion down/towards user. + WheelLeft // Wheel motion to left. + WheelRight // Wheel motion to right. + ButtonNone ButtonMask = 0 // No button or wheel events. + + ButtonPrimary = Button1 + ButtonSecondary = Button2 + ButtonMiddle = Button3 +) diff --git a/vendor/github.com/gdamore/tcell/v2/nonblock_bsd.go b/vendor/github.com/gdamore/tcell/v2/nonblock_bsd.go new file mode 100644 index 0000000000..28fd4644c7 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/nonblock_bsd.go @@ -0,0 +1,42 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build darwin dragonfly freebsd netbsd openbsd + +package tcell + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// BSD systems use TIOC style ioctls. + +// tcSetBufParams is used by the tty driver on UNIX systems to configure the +// buffering parameters (minimum character count and minimum wait time in msec.) +// This also waits for output to drain first. +func tcSetBufParams(fd int, vMin uint8, vTime uint8) error { + _ = syscall.SetNonblock(fd, true) + tio, err := unix.IoctlGetTermios(fd, unix.TIOCGETA) + if err != nil { + return err + } + tio.Cc[unix.VMIN] = vMin + tio.Cc[unix.VTIME] = vTime + if err = unix.IoctlSetTermios(fd, unix.TIOCSETAW, tio); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/nonblock_unix.go b/vendor/github.com/gdamore/tcell/v2/nonblock_unix.go new file mode 100644 index 0000000000..fe31844cf5 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/nonblock_unix.go @@ -0,0 +1,40 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build linux aix zos solaris + +package tcell + +import ( + "syscall" + + "golang.org/x/sys/unix" +) + +// tcSetBufParams is used by the tty driver on UNIX systems to configure the +// buffering parameters (minimum character count and minimum wait time in msec.) +// This also waits for output to drain first. +func tcSetBufParams(fd int, vMin uint8, vTime uint8) error { + _ = syscall.SetNonblock(fd, true) + tio, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return err + } + tio.Cc[unix.VMIN] = vMin + tio.Cc[unix.VTIME] = vTime + if err = unix.IoctlSetTermios(fd, unix.TCSETSW, tio); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/paste.go b/vendor/github.com/gdamore/tcell/v2/paste.go new file mode 100644 index 0000000000..71cf8b1cce --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/paste.go @@ -0,0 +1,48 @@ +// Copyright 2020 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// EventPaste is used to mark the start and end of a bracketed paste. +// An event with .Start() true will be sent to mark the start. +// Then a number of keys will be sent to indicate that the content +// is pasted in. At the end, an event with .Start() false will be sent. +type EventPaste struct { + start bool + t time.Time +} + +// When returns the time when this EventMouse was created. +func (ev *EventPaste) When() time.Time { + return ev.t +} + +// Start returns true if this is the start of a paste. +func (ev *EventPaste) Start() bool { + return ev.start +} + +// End returns true if this is the end of a paste. +func (ev *EventPaste) End() bool { + return !ev.start +} + +// NewEventPaste returns a new EventPaste. +func NewEventPaste(start bool) *EventPaste { + return &EventPaste{t: time.Now(), start: start} +} diff --git a/vendor/github.com/gdamore/tcell/v2/resize.go b/vendor/github.com/gdamore/tcell/v2/resize.go new file mode 100644 index 0000000000..0385673c83 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/resize.go @@ -0,0 +1,42 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "time" +) + +// EventResize is sent when the window size changes. +type EventResize struct { + t time.Time + w int + h int +} + +// NewEventResize creates an EventResize with the new updated window size, +// which is given in character cells. +func NewEventResize(width, height int) *EventResize { + return &EventResize{t: time.Now(), w: width, h: height} +} + +// When returns the time when the Event was created. +func (ev *EventResize) When() time.Time { + return ev.t +} + +// Size returns the new window size as width, height in character cells. +func (ev *EventResize) Size() (int, int) { + return ev.w, ev.h +} diff --git a/vendor/github.com/gdamore/tcell/v2/runes.go b/vendor/github.com/gdamore/tcell/v2/runes.go new file mode 100644 index 0000000000..ed9c63b5c1 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/runes.go @@ -0,0 +1,111 @@ +// Copyright 2015 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// The names of these constants are chosen to match Terminfo names, +// modulo case, and changing the prefix from ACS_ to Rune. These are +// the runes we provide extra special handling for, with ASCII fallbacks +// for terminals that lack them. +const ( + RuneSterling = '£' + RuneDArrow = '↓' + RuneLArrow = '←' + RuneRArrow = '→' + RuneUArrow = '↑' + RuneBullet = '·' + RuneBoard = '░' + RuneCkBoard = '▒' + RuneDegree = '°' + RuneDiamond = '◆' + RuneGEqual = '≥' + RunePi = 'π' + RuneHLine = '─' + RuneLantern = '§' + RunePlus = '┼' + RuneLEqual = '≤' + RuneLLCorner = '└' + RuneLRCorner = '┘' + RuneNEqual = '≠' + RunePlMinus = '±' + RuneS1 = '⎺' + RuneS3 = '⎻' + RuneS7 = '⎼' + RuneS9 = '⎽' + RuneBlock = '█' + RuneTTee = '┬' + RuneRTee = '┤' + RuneLTee = '├' + RuneBTee = '┴' + RuneULCorner = '┌' + RuneURCorner = '┐' + RuneVLine = '│' +) + +// RuneFallbacks is the default map of fallback strings that will be +// used to replace a rune when no other more appropriate transformation +// is available, and the rune cannot be displayed directly. +// +// New entries may be added to this map over time, as it becomes clear +// that such is desirable. Characters that represent either letters or +// numbers should not be added to this list unless it is certain that +// the meaning will still convey unambiguously. +// +// As an example, it would be appropriate to add an ASCII mapping for +// the full width form of the letter 'A', but it would not be appropriate +// to do so a glyph representing the country China. +// +// Programs that desire richer fallbacks may register additional ones, +// or change or even remove these mappings with Screen.RegisterRuneFallback +// Screen.UnregisterRuneFallback methods. +// +// Note that Unicode is presumed to be able to display all glyphs. +// This is a pretty poor assumption, but there is no easy way to +// figure out which glyphs are supported in a given font. Hence, +// some care in selecting the characters you support in your application +// is still appropriate. +var RuneFallbacks = map[rune]string{ + RuneSterling: "f", + RuneDArrow: "v", + RuneLArrow: "<", + RuneRArrow: ">", + RuneUArrow: "^", + RuneBullet: "o", + RuneBoard: "#", + RuneCkBoard: ":", + RuneDegree: "\\", + RuneDiamond: "+", + RuneGEqual: ">", + RunePi: "*", + RuneHLine: "-", + RuneLantern: "#", + RunePlus: "+", + RuneLEqual: "<", + RuneLLCorner: "+", + RuneLRCorner: "+", + RuneNEqual: "!", + RunePlMinus: "#", + RuneS1: "~", + RuneS3: "-", + RuneS7: "-", + RuneS9: "_", + RuneBlock: "#", + RuneTTee: "+", + RuneRTee: "+", + RuneLTee: "+", + RuneBTee: "+", + RuneULCorner: "+", + RuneURCorner: "+", + RuneVLine: "|", +} diff --git a/vendor/github.com/gdamore/tcell/v2/screen.go b/vendor/github.com/gdamore/tcell/v2/screen.go new file mode 100644 index 0000000000..cbe505f9e4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/screen.go @@ -0,0 +1,260 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// Screen represents the physical (or emulated) screen. +// This can be a terminal window or a physical console. Platforms implement +// this differently. +type Screen interface { + // Init initializes the screen for use. + Init() error + + // Fini finalizes the screen also releasing resources. + Fini() + + // Clear erases the screen. The contents of any screen buffers + // will also be cleared. This has the logical effect of + // filling the screen with spaces, using the global default style. + Clear() + + // Fill fills the screen with the given character and style. + Fill(rune, Style) + + // SetCell is an older API, and will be removed. Please use + // SetContent instead; SetCell is implemented in terms of SetContent. + SetCell(x int, y int, style Style, ch ...rune) + + // GetContent returns the contents at the given location. If the + // coordinates are out of range, then the values will be 0, nil, + // StyleDefault. Note that the contents returned are logical contents + // and may not actually be what is displayed, but rather are what will + // be displayed if Show() or Sync() is called. The width is the width + // in screen cells; most often this will be 1, but some East Asian + // characters require two cells. + GetContent(x, y int) (mainc rune, combc []rune, style Style, width int) + + // SetContent sets the contents of the given cell location. If + // the coordinates are out of range, then the operation is ignored. + // + // The first rune is the primary non-zero width rune. The array + // that follows is a possible list of combining characters to append, + // and will usually be nil (no combining characters.) + // + // The results are not displayd until Show() or Sync() is called. + // + // Note that wide (East Asian full width) runes occupy two cells, + // and attempts to place character at next cell to the right will have + // undefined effects. Wide runes that are printed in the + // last column will be replaced with a single width space on output. + SetContent(x int, y int, mainc rune, combc []rune, style Style) + + // SetStyle sets the default style to use when clearing the screen + // or when StyleDefault is specified. If it is also StyleDefault, + // then whatever system/terminal default is relevant will be used. + SetStyle(style Style) + + // ShowCursor is used to display the cursor at a given location. + // If the coordinates -1, -1 are given or are otherwise outside the + // dimensions of the screen, the cursor will be hidden. + ShowCursor(x int, y int) + + // HideCursor is used to hide the cursor. Its an alias for + // ShowCursor(-1, -1). + HideCursor() + + // Size returns the screen size as width, height. This changes in + // response to a call to Clear or Flush. + Size() (int, int) + + // ChannelEvents is an infinite loop that waits for an event and + // channels it into the user provided channel ch. Closing the + // quit channel and calling the Fini method are cancellation + // signals. When a cancellation signal is received the method + // returns after closing ch. + // + // This method should be used as a goroutine. + // + // NOTE: PollEvent should not be called while this method is running. + ChannelEvents(ch chan<- Event, quit <-chan struct{}) + + // PollEvent waits for events to arrive. Main application loops + // must spin on this to prevent the application from stalling. + // Furthermore, this will return nil if the Screen is finalized. + PollEvent() Event + + // HasPendingEvent returns true if PollEvent would return an event + // without blocking. If the screen is stopped and PollEvent would + // return nil, then the return value from this function is unspecified. + // The purpose of this function is to allow multiple events to be collected + // at once, to minimize screen redraws. + HasPendingEvent() bool + + // PostEvent tries to post an event into the event stream. This + // can fail if the event queue is full. In that case, the event + // is dropped, and ErrEventQFull is returned. + PostEvent(ev Event) error + + // Deprecated: PostEventWait is unsafe, and will be removed + // in the future. + // + // PostEventWait is like PostEvent, but if the queue is full, it + // blocks until there is space in the queue, making delivery + // reliable. However, it is VERY important that this function + // never be called from within whatever event loop is polling + // with PollEvent(), otherwise a deadlock may arise. + // + // For this reason, when using this function, the use of a + // Goroutine is recommended to ensure no deadlock can occur. + PostEventWait(ev Event) + + // EnableMouse enables the mouse. (If your terminal supports it.) + // If no flags are specified, then all events are reported, if the + // terminal supports them. + EnableMouse(...MouseFlags) + + // DisableMouse disables the mouse. + DisableMouse() + + // EnablePaste enables bracketed paste mode, if supported. + EnablePaste() + + // DisablePaste disables bracketed paste mode. + DisablePaste() + + // HasMouse returns true if the terminal (apparently) supports a + // mouse. Note that the a return value of true doesn't guarantee that + // a mouse/pointing device is present; a false return definitely + // indicates no mouse support is available. + HasMouse() bool + + // Colors returns the number of colors. All colors are assumed to + // use the ANSI color map. If a terminal is monochrome, it will + // return 0. + Colors() int + + // Show makes all the content changes made using SetContent() visible + // on the display. + // + // It does so in the most efficient and least visually disruptive + // manner possible. + Show() + + // Sync works like Show(), but it updates every visible cell on the + // physical display, assuming that it is not synchronized with any + // internal model. This may be both expensive and visually jarring, + // so it should only be used when believed to actually be necessary. + // + // Typically this is called as a result of a user-requested redraw + // (e.g. to clear up on screen corruption caused by some other program), + // or during a resize event. + Sync() + + // CharacterSet returns information about the character set. + // This isn't the full locale, but it does give us the input/output + // character set. Note that this is just for diagnostic purposes, + // we normally translate input/output to/from UTF-8, regardless of + // what the user's environment is. + CharacterSet() string + + // RegisterRuneFallback adds a fallback for runes that are not + // part of the character set -- for example one could register + // o as a fallback for ø. This should be done cautiously for + // characters that might be displayed ordinarily in language + // specific text -- characters that could change the meaning of + // of written text would be dangerous. The intention here is to + // facilitate fallback characters in pseudo-graphical applications. + // + // If the terminal has fallbacks already in place via an alternate + // character set, those are used in preference. Also, standard + // fallbacks for graphical characters in the ACSC terminfo string + // are registered implicitly. + // + // The display string should be the same width as original rune. + // This makes it possible to register two character replacements + // for full width East Asian characters, for example. + // + // It is recommended that replacement strings consist only of + // 7-bit ASCII, since other characters may not display everywhere. + RegisterRuneFallback(r rune, subst string) + + // UnregisterRuneFallback unmaps a replacement. It will unmap + // the implicit ASCII replacements for alternate characters as well. + // When an unmapped char needs to be displayed, but no suitable + // glyph is available, '?' is emitted instead. It is not possible + // to "disable" the use of alternate characters that are supported + // by your terminal except by changing the terminal database. + UnregisterRuneFallback(r rune) + + // CanDisplay returns true if the given rune can be displayed on + // this screen. Note that this is a best guess effort -- whether + // your fonts support the character or not may be questionable. + // Mostly this is for folks who work outside of Unicode. + // + // If checkFallbacks is true, then if any (possibly imperfect) + // fallbacks are registered, this will return true. This will + // also return true if the terminal can replace the glyph with + // one that is visually indistinguishable from the one requested. + CanDisplay(r rune, checkFallbacks bool) bool + + // Resize does nothing, since its generally not possible to + // ask a screen to resize, but it allows the Screen to implement + // the View interface. + Resize(int, int, int, int) + + // HasKey returns true if the keyboard is believed to have the + // key. In some cases a keyboard may have keys with this name + // but no support for them, while in others a key may be reported + // as supported but not actually be usable (such as some emulators + // that hijack certain keys). Its best not to depend to strictly + // on this function, but it can be used for hinting when building + // menus, displayed hot-keys, etc. Note that KeyRune (literal + // runes) is always true. + HasKey(Key) bool + + // Suspend pauses input and output processing. It also restores the + // terminal settings to what they were when the application started. + // This can be used to, for example, run a sub-shell. + Suspend() error + + // Resume resumes after Suspend(). + Resume() error + + // Beep attempts to sound an OS-dependent audible alert and returns an error + // when unsuccessful. + Beep() error +} + +// NewScreen returns a default Screen suitable for the user's terminal +// environment. +func NewScreen() (Screen, error) { + // Windows is happier if we try for a console screen first. + if s, _ := NewConsoleScreen(); s != nil { + return s, nil + } else if s, e := NewTerminfoScreen(); s != nil { + return s, nil + } else { + return nil, e + } +} + +// MouseFlags are options to modify the handling of mouse events. +// Actual events can be or'd together. +type MouseFlags int + +const ( + MouseButtonEvents = MouseFlags(1) // Click events only + MouseDragEvents = MouseFlags(2) // Click-drag events (includes button events) + MouseMotionEvents = MouseFlags(4) // All mouse events (includes click and drag events) +) diff --git a/vendor/github.com/gdamore/tcell/v2/simulation.go b/vendor/github.com/gdamore/tcell/v2/simulation.go new file mode 100644 index 0000000000..451460be25 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/simulation.go @@ -0,0 +1,554 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "sync" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// NewSimulationScreen returns a SimulationScreen. Note that +// SimulationScreen is also a Screen. +func NewSimulationScreen(charset string) SimulationScreen { + if charset == "" { + charset = "UTF-8" + } + s := &simscreen{charset: charset} + return s +} + +// SimulationScreen represents a screen simulation. This is intended to +// be a superset of normal Screens, but also adds some important interfaces +// for testing. +type SimulationScreen interface { + // InjectKeyBytes injects a stream of bytes corresponding to + // the native encoding (see charset). It turns true if the entire + // set of bytes were processed and delivered as KeyEvents, false + // if any bytes were not fully understood. Any bytes that are not + // fully converted are discarded. + InjectKeyBytes(buf []byte) bool + + // InjectKey injects a key event. The rune is a UTF-8 rune, post + // any translation. + InjectKey(key Key, r rune, mod ModMask) + + // InjectMouse injects a mouse event. + InjectMouse(x, y int, buttons ButtonMask, mod ModMask) + + // SetSize resizes the underlying physical screen. It also causes + // a resize event to be injected during the next Show() or Sync(). + // A new physical contents array will be allocated (with data from + // the old copied), so any prior value obtained with GetContents + // won't be used anymore + SetSize(width, height int) + + // GetContents returns screen contents as an array of + // cells, along with the physical width & height. Note that the + // physical contents will be used until the next time SetSize() + // is called. + GetContents() (cells []SimCell, width int, height int) + + // GetCursor returns the cursor details. + GetCursor() (x int, y int, visible bool) + + Screen +} + +// SimCell represents a simulated screen cell. The purpose of this +// is to track on screen content. +type SimCell struct { + // Bytes is the actual character bytes. Normally this is + // rune data, but it could be be data in another encoding system. + Bytes []byte + + // Style is the style used to display the data. + Style Style + + // Runes is the list of runes, unadulterated, in UTF-8. + Runes []rune +} + +type simscreen struct { + physw int + physh int + fini bool + style Style + evch chan Event + quit chan struct{} + + front []SimCell + back CellBuffer + clear bool + cursorx int + cursory int + cursorvis bool + mouse bool + paste bool + charset string + encoder transform.Transformer + decoder transform.Transformer + fillchar rune + fillstyle Style + fallback map[rune]string + + sync.Mutex +} + +func (s *simscreen) Init() error { + s.evch = make(chan Event, 10) + s.quit = make(chan struct{}) + s.fillchar = 'X' + s.fillstyle = StyleDefault + s.mouse = false + s.physw = 80 + s.physh = 25 + s.cursorx = -1 + s.cursory = -1 + s.style = StyleDefault + + if enc := GetEncoding(s.charset); enc != nil { + s.encoder = enc.NewEncoder() + s.decoder = enc.NewDecoder() + } else { + return ErrNoCharset + } + + s.front = make([]SimCell, s.physw*s.physh) + s.back.Resize(80, 25) + + // default fallbacks + s.fallback = make(map[rune]string) + for k, v := range RuneFallbacks { + s.fallback[k] = v + } + return nil +} + +func (s *simscreen) Fini() { + s.Lock() + s.fini = true + s.back.Resize(0, 0) + s.Unlock() + if s.quit != nil { + close(s.quit) + } + s.physw = 0 + s.physh = 0 + s.front = nil +} + +func (s *simscreen) SetStyle(style Style) { + s.Lock() + s.style = style + s.Unlock() +} + +func (s *simscreen) Clear() { + s.Fill(' ', s.style) +} + +func (s *simscreen) Fill(r rune, style Style) { + s.Lock() + s.back.Fill(r, style) + s.Unlock() +} + +func (s *simscreen) SetCell(x, y int, style Style, ch ...rune) { + + if len(ch) > 0 { + s.SetContent(x, y, ch[0], ch[1:], style) + } else { + s.SetContent(x, y, ' ', nil, style) + } +} + +func (s *simscreen) SetContent(x, y int, mainc rune, combc []rune, st Style) { + + s.Lock() + s.back.SetContent(x, y, mainc, combc, st) + s.Unlock() +} + +func (s *simscreen) GetContent(x, y int) (rune, []rune, Style, int) { + var mainc rune + var combc []rune + var style Style + var width int + s.Lock() + mainc, combc, style, width = s.back.GetContent(x, y) + s.Unlock() + return mainc, combc, style, width +} + +func (s *simscreen) drawCell(x, y int) int { + + mainc, combc, style, width := s.back.GetContent(x, y) + if !s.back.Dirty(x, y) { + return width + } + if x >= s.physw || y >= s.physh || x < 0 || y < 0 { + return width + } + simc := &s.front[(y*s.physw)+x] + + if style == StyleDefault { + style = s.style + } + simc.Style = style + simc.Runes = append([]rune{mainc}, combc...) + + // now emit runes - taking care to not overrun width with a + // wide character, and to ensure that we emit exactly one regular + // character followed up by any residual combing characters + + simc.Bytes = nil + + if x > s.physw-width { + simc.Runes = []rune{' '} + simc.Bytes = []byte{' '} + return width + } + + lbuf := make([]byte, 12) + ubuf := make([]byte, 12) + nout := 0 + + for _, r := range simc.Runes { + + l := utf8.EncodeRune(ubuf, r) + + nout, _, _ = s.encoder.Transform(lbuf, ubuf[:l], true) + + if nout == 0 || lbuf[0] == '\x1a' { + + // skip combining + + if subst, ok := s.fallback[r]; ok { + simc.Bytes = append(simc.Bytes, + []byte(subst)...) + + } else if r >= ' ' && r <= '~' { + simc.Bytes = append(simc.Bytes, byte(r)) + + } else if simc.Bytes == nil { + simc.Bytes = append(simc.Bytes, '?') + } + } else { + simc.Bytes = append(simc.Bytes, lbuf[:nout]...) + } + } + s.back.SetDirty(x, y, false) + return width +} + +func (s *simscreen) ShowCursor(x, y int) { + s.Lock() + s.cursorx, s.cursory = x, y + s.showCursor() + s.Unlock() +} + +func (s *simscreen) HideCursor() { + s.ShowCursor(-1, -1) +} + +func (s *simscreen) showCursor() { + + x, y := s.cursorx, s.cursory + if x < 0 || y < 0 || x >= s.physw || y >= s.physh { + s.cursorvis = false + } else { + s.cursorvis = true + } +} + +func (s *simscreen) hideCursor() { + // does not update cursor position + s.cursorvis = false +} + +func (s *simscreen) Show() { + s.Lock() + s.resize() + s.draw() + s.Unlock() +} + +func (s *simscreen) clearScreen() { + // We emulate a hardware clear by filling with a specific pattern + for i := range s.front { + s.front[i].Style = s.fillstyle + s.front[i].Runes = []rune{s.fillchar} + s.front[i].Bytes = []byte{byte(s.fillchar)} + } + s.clear = false +} + +func (s *simscreen) draw() { + s.hideCursor() + if s.clear { + s.clearScreen() + } + + w, h := s.back.Size() + for y := 0; y < h; y++ { + for x := 0; x < w; x++ { + width := s.drawCell(x, y) + x += width - 1 + } + } + s.showCursor() +} + +func (s *simscreen) EnableMouse(...MouseFlags) { + s.mouse = true +} + +func (s *simscreen) DisableMouse() { + s.mouse = false +} + +func (s *simscreen) EnablePaste() { + s.paste = true +} + +func (s *simscreen) DisablePaste() { + s.paste = false +} + +func (s *simscreen) Size() (int, int) { + s.Lock() + w, h := s.back.Size() + s.Unlock() + return w, h +} + +func (s *simscreen) resize() { + w, h := s.physw, s.physh + ow, oh := s.back.Size() + if w != ow || h != oh { + s.back.Resize(w, h) + ev := NewEventResize(w, h) + s.PostEvent(ev) + } +} + +func (s *simscreen) Colors() int { + return 256 +} + +func (s *simscreen) ChannelEvents(ch chan<- Event, quit <-chan struct{}) { + defer close(ch) + for { + select { + case <-quit: + return + case <-s.quit: + return + case ev := <-s.evch: + select { + case <-quit: + return + case <-s.quit: + return + case ch <- ev: + } + } + } +} + +func (s *simscreen) PollEvent() Event { + select { + case <-s.quit: + return nil + case ev := <-s.evch: + return ev + } +} + +func (s *simscreen) HasPendingEvent() bool { + return len(s.evch) > 0 +} + +func (s *simscreen) PostEventWait(ev Event) { + s.evch <- ev +} + +func (s *simscreen) PostEvent(ev Event) error { + select { + case s.evch <- ev: + return nil + default: + return ErrEventQFull + } +} + +func (s *simscreen) InjectMouse(x, y int, buttons ButtonMask, mod ModMask) { + ev := NewEventMouse(x, y, buttons, mod) + s.PostEvent(ev) +} + +func (s *simscreen) InjectKey(key Key, r rune, mod ModMask) { + ev := NewEventKey(key, r, mod) + s.PostEvent(ev) +} + +func (s *simscreen) InjectKeyBytes(b []byte) bool { + failed := false + +outer: + for len(b) > 0 { + if b[0] >= ' ' && b[0] <= 0x7F { + // printable ASCII easy to deal with -- no encodings + ev := NewEventKey(KeyRune, rune(b[0]), ModNone) + s.PostEvent(ev) + b = b[1:] + continue + } + + if b[0] < 0x80 { + mod := ModNone + // No encodings start with low numbered values + if Key(b[0]) >= KeyCtrlA && Key(b[0]) <= KeyCtrlZ { + mod = ModCtrl + } + ev := NewEventKey(Key(b[0]), 0, mod) + s.PostEvent(ev) + b = b[1:] + continue + } + + utfb := make([]byte, len(b)*4) // worst case + for l := 1; l < len(b); l++ { + s.decoder.Reset() + nout, nin, _ := s.decoder.Transform(utfb, b[:l], true) + + if nout != 0 { + r, _ := utf8.DecodeRune(utfb[:nout]) + if r != utf8.RuneError { + ev := NewEventKey(KeyRune, r, ModNone) + s.PostEvent(ev) + } + b = b[nin:] + continue outer + } + } + failed = true + b = b[1:] + continue + } + + return !failed +} + +func (s *simscreen) Sync() { + s.Lock() + s.clear = true + s.resize() + s.back.Invalidate() + s.draw() + s.Unlock() +} + +func (s *simscreen) CharacterSet() string { + return s.charset +} + +func (s *simscreen) SetSize(w, h int) { + s.Lock() + newc := make([]SimCell, w*h) + for row := 0; row < h && row < s.physh; row++ { + for col := 0; col < w && col < s.physw; col++ { + newc[(row*w)+col] = s.front[(row*s.physw)+col] + } + } + s.cursorx, s.cursory = -1, -1 + s.physw, s.physh = w, h + s.front = newc + s.back.Resize(w, h) + s.Unlock() +} + +func (s *simscreen) GetContents() ([]SimCell, int, int) { + s.Lock() + cells, w, h := s.front, s.physw, s.physh + s.Unlock() + return cells, w, h +} + +func (s *simscreen) GetCursor() (int, int, bool) { + s.Lock() + x, y, vis := s.cursorx, s.cursory, s.cursorvis + s.Unlock() + return x, y, vis +} + +func (s *simscreen) RegisterRuneFallback(r rune, subst string) { + s.Lock() + s.fallback[r] = subst + s.Unlock() +} + +func (s *simscreen) UnregisterRuneFallback(r rune) { + s.Lock() + delete(s.fallback, r) + s.Unlock() +} + +func (s *simscreen) CanDisplay(r rune, checkFallbacks bool) bool { + + if enc := s.encoder; enc != nil { + nb := make([]byte, 6) + ob := make([]byte, 6) + num := utf8.EncodeRune(ob, r) + + enc.Reset() + dst, _, err := enc.Transform(nb, ob[:num], true) + if dst != 0 && err == nil && nb[0] != '\x1A' { + return true + } + } + if !checkFallbacks { + return false + } + if _, ok := s.fallback[r]; ok { + return true + } + return false +} + +func (s *simscreen) HasMouse() bool { + return false +} + +func (s *simscreen) Resize(int, int, int, int) {} + +func (s *simscreen) HasKey(Key) bool { + return true +} + +func (s *simscreen) Beep() error { + return nil +} + +func (s *simscreen) Suspend() error { + return nil +} + +func (s *simscreen) Resume() error { + return nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/stdin_unix.go b/vendor/github.com/gdamore/tcell/v2/stdin_unix.go new file mode 100644 index 0000000000..e3d32e9fd9 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/stdin_unix.go @@ -0,0 +1,177 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package tcell + +import ( + "errors" + "fmt" + "os" + "os/signal" + "strconv" + "sync" + "syscall" + "time" + + "golang.org/x/term" +) + +// stdIoTty is an implementation of the Tty API based upon stdin/stdout. +type stdIoTty struct { + fd int + in *os.File + out *os.File + saved *term.State + sig chan os.Signal + cb func() + stopQ chan struct{} + dev string + wg sync.WaitGroup + l sync.Mutex +} + +func (tty *stdIoTty) Read(b []byte) (int, error) { + return tty.in.Read(b) +} + +func (tty *stdIoTty) Write(b []byte) (int, error) { + return tty.out.Write(b) +} + +func (tty *stdIoTty) Close() error { + return nil +} + +func (tty *stdIoTty) Start() error { + tty.l.Lock() + defer tty.l.Unlock() + + // We open another copy of /dev/tty. This is a workaround for unusual behavior + // observed in macOS, apparently caused when a subshell (for example) closes our + // own tty device (when it exits for example). Getting a fresh new one seems to + // resolve the problem. (We believe this is a bug in the macOS tty driver that + // fails to account for dup() references to the same file before applying close() + // related behaviors to the tty.) We're also holding the original copy we opened + // since closing that might have deleterious effects as well. The upshot is that + // we will have up to two separate file handles open on /dev/tty. (Note that when + // using stdin/stdout instead of /dev/tty this problem is not observed.) + var err error + tty.in = os.Stdin + tty.out = os.Stdout + tty.fd = int(tty.in.Fd()) + + if !term.IsTerminal(tty.fd) { + return errors.New("device is not a terminal") + } + + _ = tty.in.SetReadDeadline(time.Time{}) + saved, err := term.MakeRaw(tty.fd) // also sets vMin and vTime + if err != nil { + return err + } + tty.saved = saved + + tty.stopQ = make(chan struct{}) + tty.wg.Add(1) + go func(stopQ chan struct{}) { + defer tty.wg.Done() + for { + select { + case <-tty.sig: + tty.l.Lock() + cb := tty.cb + tty.l.Unlock() + if cb != nil { + cb() + } + case <-stopQ: + return + } + } + }(tty.stopQ) + + signal.Notify(tty.sig, syscall.SIGWINCH) + return nil +} + +func (tty *stdIoTty) Drain() error { + _ = tty.in.SetReadDeadline(time.Now()) + if err := tcSetBufParams(tty.fd, 0, 0); err != nil { + return err + } + return nil +} + +func (tty *stdIoTty) Stop() error { + tty.l.Lock() + if err := term.Restore(tty.fd, tty.saved); err != nil { + tty.l.Unlock() + return err + } + _ = tty.in.SetReadDeadline(time.Now()) + + signal.Stop(tty.sig) + close(tty.stopQ) + tty.l.Unlock() + + tty.wg.Wait() + + return nil +} + +func (tty *stdIoTty) WindowSize() (int, int, error) { + w, h, err := term.GetSize(tty.fd) + if err != nil { + return 0, 0, err + } + if w == 0 { + w, _ = strconv.Atoi(os.Getenv("COLUMNS")) + } + if w == 0 { + w = 80 // default + } + if h == 0 { + h, _ = strconv.Atoi(os.Getenv("LINES")) + } + if h == 0 { + h = 25 // default + } + return w, h, nil +} + +func (tty *stdIoTty) NotifyResize(cb func()) { + tty.l.Lock() + tty.cb = cb + tty.l.Unlock() +} + +// NewStdioTty opens a tty using standard input/output. +func NewStdIoTty() (Tty, error) { + tty := &stdIoTty{ + sig: make(chan os.Signal), + in: os.Stdin, + out: os.Stdout, + } + var err error + tty.fd = int(tty.in.Fd()) + if !term.IsTerminal(tty.fd) { + return nil, errors.New("not a terminal") + } + if tty.saved, err = term.GetState(tty.fd); err != nil { + return nil, fmt.Errorf("failed to get state: %w", err) + } + return tty, nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/style.go b/vendor/github.com/gdamore/tcell/v2/style.go new file mode 100644 index 0000000000..8359e28c67 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/style.go @@ -0,0 +1,137 @@ +// Copyright 2020 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// Style represents a complete text style, including both foreground color, +// background color, and additional attributes such as "bold" or "underline". +// +// Note that not all terminals can display all colors or attributes, and +// many might have specific incompatibilities between specific attributes +// and color combinations. +// +// To use Style, just declare a variable of its type. +type Style struct { + fg Color + bg Color + attrs AttrMask +} + +// StyleDefault represents a default style, based upon the context. +// It is the zero value. +var StyleDefault Style + +// styleInvalid is just an arbitrary invalid style used internally. +var styleInvalid = Style{attrs: AttrInvalid} + +// Foreground returns a new style based on s, with the foreground color set +// as requested. ColorDefault can be used to select the global default. +func (s Style) Foreground(c Color) Style { + return Style{ + fg: c, + bg: s.bg, + attrs: s.attrs, + } +} + +// Background returns a new style based on s, with the background color set +// as requested. ColorDefault can be used to select the global default. +func (s Style) Background(c Color) Style { + return Style{ + fg: s.fg, + bg: c, + attrs: s.attrs, + } +} + +// Decompose breaks a style up, returning the foreground, background, +// and other attributes. +func (s Style) Decompose() (fg Color, bg Color, attr AttrMask) { + return s.fg, s.bg, s.attrs +} + +func (s Style) setAttrs(attrs AttrMask, on bool) Style { + if on { + return Style{ + fg: s.fg, + bg: s.bg, + attrs: s.attrs | attrs, + } + } + return Style{ + fg: s.fg, + bg: s.bg, + attrs: s.attrs &^ attrs, + } +} + +// Normal returns the style with all attributes disabled. +func (s Style) Normal() Style { + return Style{ + fg: s.fg, + bg: s.bg, + } +} + +// Bold returns a new style based on s, with the bold attribute set +// as requested. +func (s Style) Bold(on bool) Style { + return s.setAttrs(AttrBold, on) +} + +// Blink returns a new style based on s, with the blink attribute set +// as requested. +func (s Style) Blink(on bool) Style { + return s.setAttrs(AttrBlink, on) +} + +// Dim returns a new style based on s, with the dim attribute set +// as requested. +func (s Style) Dim(on bool) Style { + return s.setAttrs(AttrDim, on) +} + +// Italic returns a new style based on s, with the italic attribute set +// as requested. +func (s Style) Italic(on bool) Style { + return s.setAttrs(AttrItalic, on) +} + +// Reverse returns a new style based on s, with the reverse attribute set +// as requested. (Reverse usually changes the foreground and background +// colors.) +func (s Style) Reverse(on bool) Style { + return s.setAttrs(AttrReverse, on) +} + +// Underline returns a new style based on s, with the underline attribute set +// as requested. +func (s Style) Underline(on bool) Style { + return s.setAttrs(AttrUnderline, on) +} + +// StrikeThrough sets strikethrough mode. +func (s Style) StrikeThrough(on bool) Style { + return s.setAttrs(AttrStrikeThrough, on) +} + +// Attributes returns a new style based on s, with its attributes set as +// specified. +func (s Style) Attributes(attrs AttrMask) Style { + return Style{ + fg: s.fg, + bg: s.bg, + attrs: attrs, + } +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/.gitignore b/vendor/github.com/gdamore/tcell/v2/terminfo/.gitignore new file mode 100644 index 0000000000..74f3c04fd6 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/.gitignore @@ -0,0 +1 @@ +mkinfo diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/README.md b/vendor/github.com/gdamore/tcell/v2/terminfo/README.md new file mode 100644 index 0000000000..20ae937f38 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/README.md @@ -0,0 +1,25 @@ +This package represents the parent for all terminals. + +In older versions of tcell we had (a couple of) different +external file formats for the terminal database. Those are +now removed. All terminal definitions are supplied by +one of two methods: + +1. Compiled Go code + +2. For systems with terminfo and infocmp, dynamically + generated at runtime. + +The Go code can be generated using the mkinfo utility in +this directory. The database entry should be generated +into a package in a directory named as the first character +of the package name. (This permits us to group them all +without having a huge directory of little packages.) + +It may be desirable to add new packages to the extended +package, or -- rarely -- the base package. + +Applications which want to have the large set of terminal +descriptions built into the binary can simply import the +extended package. Otherwise a smaller reasonable default +set (the base package) will be included instead. diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/TERMINALS.md b/vendor/github.com/gdamore/tcell/v2/terminfo/TERMINALS.md new file mode 100644 index 0000000000..85c1e61c2e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/TERMINALS.md @@ -0,0 +1,7 @@ +TERMINALS +========= + +The best way to populate terminals on Debian is to install ncurses, +ncurses-term, screen, tmux, rxvt-unicode, and dvtm. This populates the +the terminfo database so that we can have a reasonable set of starting +terminals. diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/a/aixterm/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/a/aixterm/term.go new file mode 100644 index 0000000000..503c9199ed --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/a/aixterm/term.go @@ -0,0 +1,83 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package aixterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // IBM Aixterm Terminal Emulator + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "aixterm", + Columns: 80, + Lines: 25, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[32m\x1b[40m", + PadChar: "\x00", + AltChars: "jjkkllmmnnqqttuuvvwwxx", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[139q", + KeyDelete: "\x1b[P", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + KeyEnd: "\x1b[146q", + KeyPgUp: "\x1b[150q", + KeyPgDn: "\x1b[154q", + KeyF1: "\x1b[001q", + KeyF2: "\x1b[002q", + KeyF3: "\x1b[003q", + KeyF4: "\x1b[004q", + KeyF5: "\x1b[005q", + KeyF6: "\x1b[006q", + KeyF7: "\x1b[007q", + KeyF8: "\x1b[008q", + KeyF9: "\x1b[009q", + KeyF10: "\x1b[010q", + KeyF11: "\x1b[011q", + KeyF12: "\x1b[012q", + KeyF13: "\x1b[013q", + KeyF14: "\x1b[014q", + KeyF15: "\x1b[015q", + KeyF16: "\x1b[016q", + KeyF17: "\x1b[017q", + KeyF18: "\x1b[018q", + KeyF19: "\x1b[019q", + KeyF20: "\x1b[020q", + KeyF21: "\x1b[021q", + KeyF22: "\x1b[022q", + KeyF23: "\x1b[023q", + KeyF24: "\x1b[024q", + KeyF25: "\x1b[025q", + KeyF26: "\x1b[026q", + KeyF27: "\x1b[027q", + KeyF28: "\x1b[028q", + KeyF29: "\x1b[029q", + KeyF30: "\x1b[030q", + KeyF31: "\x1b[031q", + KeyF32: "\x1b[032q", + KeyF33: "\x1b[033q", + KeyF34: "\x1b[034q", + KeyF35: "\x1b[035q", + KeyF36: "\x1b[036q", + KeyClear: "\x1b[144q", + KeyBacktab: "\x1b[Z", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/a/alacritty/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/a/alacritty/term.go new file mode 100644 index 0000000000..5b97998469 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/a/alacritty/term.go @@ -0,0 +1,69 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package alacritty + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // alacritty terminal emulator + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "alacritty", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h\x1b[22;0;0t", + ExitCA: "\x1b[?1049l\x1b[23;0;0t", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[<", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/a/ansi/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/a/ansi/term.go new file mode 100644 index 0000000000..5c572fd496 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/a/ansi/term.go @@ -0,0 +1,43 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package ansi + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // ansi/pc-term compatible with color + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "ansi", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "+\x10,\x11-\x18.\x190\xdb`\x04a\xb1f\xf8g\xf1h\xb0j\xd9k\xbfl\xdam\xc0n\xc5o~p\xc4q\xc4r\xc4s_t\xc3u\xb4v\xc1w\xc2x\xb3y\xf3z\xf2{\xe3|\xd8}\x9c~\xfe", + EnterAcs: "\x1b[11m", + ExitAcs: "\x1b[10m", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\x1b[D", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[L", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + KeyBacktab: "\x1b[Z", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/b/beterm/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/b/beterm/term.go new file mode 100644 index 0000000000..e6d88838c3 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/b/beterm/term.go @@ -0,0 +1,57 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package beterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // BeOS Terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "beterm", + Columns: 80, + Lines: 25, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?4h", + ExitKeypad: "\x1b[?4l", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[m", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[16~", + KeyF7: "\x1b[17~", + KeyF8: "\x1b[18~", + KeyF9: "\x1b[19~", + KeyF10: "\x1b[20~", + KeyF11: "\x1b[21~", + KeyF12: "\x1b[22~", + AutoMargin: true, + InsertChar: "\x1b[@", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/base/base.go b/vendor/github.com/gdamore/tcell/v2/terminfo/base/base.go new file mode 100644 index 0000000000..fbecdfa93e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/base/base.go @@ -0,0 +1,32 @@ +// Copyright 2020 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is just a "minimalist" set of the base terminal descriptions. +// It should be sufficient for most applications. + +// Package base contains the base terminal descriptions that are likely +// to be needed by any stock application. It is imported by default in the +// terminfo package, so terminal types listed here will be available to any +// tcell application. +package base + +import ( + // The following imports just register themselves -- + // thse are the terminal types we aggregate in this package. + _ "github.com/gdamore/tcell/v2/terminfo/a/ansi" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt100" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt102" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt220" + _ "github.com/gdamore/tcell/v2/terminfo/x/xterm" +) diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/c/cygwin/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/c/cygwin/term.go new file mode 100644 index 0000000000..46a0a4a3a2 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/c/cygwin/term.go @@ -0,0 +1,66 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package cygwin + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // ANSI emulation for Cygwin + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "cygwin", + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "+\x10,\x11-\x18.\x190\xdb`\x04a\xb1f\xf8g\xf1h\xb0j\xd9k\xbfl\xdam\xc0n\xc5o~p\xc4q\xc4r\xc4s_t\xc3u\xb4v\xc1w\xc2x\xb3y\xf3z\xf2{\xe3|\xd8}\x9c~\xfe", + EnterAcs: "\x1b[11m", + ExitAcs: "\x1b[10m", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[[A", + KeyF2: "\x1b[[B", + KeyF3: "\x1b[[C", + KeyF4: "\x1b[[D", + KeyF5: "\x1b[[E", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + AutoMargin: true, + InsertChar: "\x1b[@", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/d/dtterm/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/d/dtterm/term.go new file mode 100644 index 0000000000..f471c80d23 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/d/dtterm/term.go @@ -0,0 +1,69 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package dtterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // CDE desktop terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "dtterm", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyHelp: "\x1b[28~", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/dynamic/dynamic.go b/vendor/github.com/gdamore/tcell/v2/terminfo/dynamic/dynamic.go new file mode 100644 index 0000000000..08ff248428 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/dynamic/dynamic.go @@ -0,0 +1,427 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The dynamic package is used to generate a terminal description dynamically, +// using infocmp. This is really a method of last resort, as the performance +// will be slow, and it requires a working infocmp. But, the hope is that it +// will assist folks who have to deal with a terminal description that isn't +// already built in. This requires infocmp to be in the user's path, and to +// support reasonably the -1 option. + +package dynamic + +import ( + "bytes" + "errors" + "os/exec" + "regexp" + "strconv" + "strings" + + "github.com/gdamore/tcell/v2/terminfo" +) + +type termcap struct { + name string + desc string + aliases []string + bools map[string]bool + nums map[string]int + strs map[string]string +} + +func (tc *termcap) getnum(s string) int { + return (tc.nums[s]) +} + +func (tc *termcap) getflag(s string) bool { + return (tc.bools[s]) +} + +func (tc *termcap) getstr(s string) string { + return (tc.strs[s]) +} + +const ( + none = iota + control + escaped +) + +var errNotAddressable = errors.New("terminal not cursor addressable") + +func unescape(s string) string { + // Various escapes are in \x format. Control codes are + // encoded as ^M (carat followed by ASCII equivalent). + // escapes are: \e, \E - escape + // \0 NULL, \n \l \r \t \b \f \s for equivalent C escape. + buf := &bytes.Buffer{} + esc := none + + for i := 0; i < len(s); i++ { + c := s[i] + switch esc { + case none: + switch c { + case '\\': + esc = escaped + case '^': + esc = control + default: + buf.WriteByte(c) + } + case control: + buf.WriteByte(c ^ 1<<6) + esc = none + case escaped: + switch c { + case 'E', 'e': + buf.WriteByte(0x1b) + case '0', '1', '2', '3', '4', '5', '6', '7': + if i+2 < len(s) && s[i+1] >= '0' && s[i+1] <= '7' && s[i+2] >= '0' && s[i+2] <= '7' { + buf.WriteByte(((c - '0') * 64) + ((s[i+1] - '0') * 8) + (s[i+2] - '0')) + i = i + 2 + } else if c == '0' { + buf.WriteByte(0) + } + case 'n': + buf.WriteByte('\n') + case 'r': + buf.WriteByte('\r') + case 't': + buf.WriteByte('\t') + case 'b': + buf.WriteByte('\b') + case 'f': + buf.WriteByte('\f') + case 's': + buf.WriteByte(' ') + default: + buf.WriteByte(c) + } + esc = none + } + } + return (buf.String()) +} + +func (tc *termcap) setupterm(name string) error { + cmd := exec.Command("infocmp", "-1", name) + output := &bytes.Buffer{} + cmd.Stdout = output + + tc.strs = make(map[string]string) + tc.bools = make(map[string]bool) + tc.nums = make(map[string]int) + + if err := cmd.Run(); err != nil { + return err + } + + // Now parse the output. + // We get comment lines (starting with "#"), followed by + // a header line that looks like "||...|" + // then capabilities, one per line, starting with a tab and ending + // with a comma and newline. + lines := strings.Split(output.String(), "\n") + for len(lines) > 0 && strings.HasPrefix(lines[0], "#") { + lines = lines[1:] + } + + // Ditch trailing empty last line + if lines[len(lines)-1] == "" { + lines = lines[:len(lines)-1] + } + header := lines[0] + if strings.HasSuffix(header, ",") { + header = header[:len(header)-1] + } + names := strings.Split(header, "|") + tc.name = names[0] + names = names[1:] + if len(names) > 0 { + tc.desc = names[len(names)-1] + names = names[:len(names)-1] + } + tc.aliases = names + for _, val := range lines[1:] { + if (!strings.HasPrefix(val, "\t")) || + (!strings.HasSuffix(val, ",")) { + return (errors.New("malformed infocmp: " + val)) + } + + val = val[1:] + val = val[:len(val)-1] + + if k := strings.SplitN(val, "=", 2); len(k) == 2 { + tc.strs[k[0]] = unescape(k[1]) + } else if k := strings.SplitN(val, "#", 2); len(k) == 2 { + u, err := strconv.ParseUint(k[1], 0, 0) + if err != nil { + return (err) + } + tc.nums[k[0]] = int(u) + } else { + tc.bools[val] = true + } + } + return nil +} + +// LoadTerminfo creates a Terminfo by for named terminal by attempting to parse +// the output from infocmp. This returns the terminfo entry, a description of +// the terminal, and either nil or an error. +func LoadTerminfo(name string) (*terminfo.Terminfo, string, error) { + var tc termcap + if err := tc.setupterm(name); err != nil { + if err != nil { + return nil, "", err + } + } + t := &terminfo.Terminfo{} + // If this is an alias record, then just emit the alias + t.Name = tc.name + if t.Name != name { + return t, "", nil + } + t.Aliases = tc.aliases + t.Colors = tc.getnum("colors") + t.Columns = tc.getnum("cols") + t.Lines = tc.getnum("lines") + t.Bell = tc.getstr("bel") + t.Clear = tc.getstr("clear") + t.EnterCA = tc.getstr("smcup") + t.ExitCA = tc.getstr("rmcup") + t.ShowCursor = tc.getstr("cnorm") + t.HideCursor = tc.getstr("civis") + t.AttrOff = tc.getstr("sgr0") + t.Underline = tc.getstr("smul") + t.Bold = tc.getstr("bold") + t.Blink = tc.getstr("blink") + t.Dim = tc.getstr("dim") + t.Italic = tc.getstr("sitm") + t.Reverse = tc.getstr("rev") + t.EnterKeypad = tc.getstr("smkx") + t.ExitKeypad = tc.getstr("rmkx") + t.SetFg = tc.getstr("setaf") + t.SetBg = tc.getstr("setab") + t.SetCursor = tc.getstr("cup") + t.CursorBack1 = tc.getstr("cub1") + t.CursorUp1 = tc.getstr("cuu1") + t.KeyF1 = tc.getstr("kf1") + t.KeyF2 = tc.getstr("kf2") + t.KeyF3 = tc.getstr("kf3") + t.KeyF4 = tc.getstr("kf4") + t.KeyF5 = tc.getstr("kf5") + t.KeyF6 = tc.getstr("kf6") + t.KeyF7 = tc.getstr("kf7") + t.KeyF8 = tc.getstr("kf8") + t.KeyF9 = tc.getstr("kf9") + t.KeyF10 = tc.getstr("kf10") + t.KeyF11 = tc.getstr("kf11") + t.KeyF12 = tc.getstr("kf12") + t.KeyF13 = tc.getstr("kf13") + t.KeyF14 = tc.getstr("kf14") + t.KeyF15 = tc.getstr("kf15") + t.KeyF16 = tc.getstr("kf16") + t.KeyF17 = tc.getstr("kf17") + t.KeyF18 = tc.getstr("kf18") + t.KeyF19 = tc.getstr("kf19") + t.KeyF20 = tc.getstr("kf20") + t.KeyF21 = tc.getstr("kf21") + t.KeyF22 = tc.getstr("kf22") + t.KeyF23 = tc.getstr("kf23") + t.KeyF24 = tc.getstr("kf24") + t.KeyF25 = tc.getstr("kf25") + t.KeyF26 = tc.getstr("kf26") + t.KeyF27 = tc.getstr("kf27") + t.KeyF28 = tc.getstr("kf28") + t.KeyF29 = tc.getstr("kf29") + t.KeyF30 = tc.getstr("kf30") + t.KeyF31 = tc.getstr("kf31") + t.KeyF32 = tc.getstr("kf32") + t.KeyF33 = tc.getstr("kf33") + t.KeyF34 = tc.getstr("kf34") + t.KeyF35 = tc.getstr("kf35") + t.KeyF36 = tc.getstr("kf36") + t.KeyF37 = tc.getstr("kf37") + t.KeyF38 = tc.getstr("kf38") + t.KeyF39 = tc.getstr("kf39") + t.KeyF40 = tc.getstr("kf40") + t.KeyF41 = tc.getstr("kf41") + t.KeyF42 = tc.getstr("kf42") + t.KeyF43 = tc.getstr("kf43") + t.KeyF44 = tc.getstr("kf44") + t.KeyF45 = tc.getstr("kf45") + t.KeyF46 = tc.getstr("kf46") + t.KeyF47 = tc.getstr("kf47") + t.KeyF48 = tc.getstr("kf48") + t.KeyF49 = tc.getstr("kf49") + t.KeyF50 = tc.getstr("kf50") + t.KeyF51 = tc.getstr("kf51") + t.KeyF52 = tc.getstr("kf52") + t.KeyF53 = tc.getstr("kf53") + t.KeyF54 = tc.getstr("kf54") + t.KeyF55 = tc.getstr("kf55") + t.KeyF56 = tc.getstr("kf56") + t.KeyF57 = tc.getstr("kf57") + t.KeyF58 = tc.getstr("kf58") + t.KeyF59 = tc.getstr("kf59") + t.KeyF60 = tc.getstr("kf60") + t.KeyF61 = tc.getstr("kf61") + t.KeyF62 = tc.getstr("kf62") + t.KeyF63 = tc.getstr("kf63") + t.KeyF64 = tc.getstr("kf64") + t.KeyInsert = tc.getstr("kich1") + t.KeyDelete = tc.getstr("kdch1") + t.KeyBackspace = tc.getstr("kbs") + t.KeyHome = tc.getstr("khome") + t.KeyEnd = tc.getstr("kend") + t.KeyUp = tc.getstr("kcuu1") + t.KeyDown = tc.getstr("kcud1") + t.KeyRight = tc.getstr("kcuf1") + t.KeyLeft = tc.getstr("kcub1") + t.KeyPgDn = tc.getstr("knp") + t.KeyPgUp = tc.getstr("kpp") + t.KeyBacktab = tc.getstr("kcbt") + t.KeyExit = tc.getstr("kext") + t.KeyCancel = tc.getstr("kcan") + t.KeyPrint = tc.getstr("kprt") + t.KeyHelp = tc.getstr("khlp") + t.KeyClear = tc.getstr("kclr") + t.AltChars = tc.getstr("acsc") + t.EnterAcs = tc.getstr("smacs") + t.ExitAcs = tc.getstr("rmacs") + t.EnableAcs = tc.getstr("enacs") + t.Mouse = tc.getstr("kmous") + t.KeyShfRight = tc.getstr("kRIT") + t.KeyShfLeft = tc.getstr("kLFT") + t.KeyShfHome = tc.getstr("kHOM") + t.KeyShfEnd = tc.getstr("kEND") + + // Terminfo lacks descriptions for a bunch of modified keys, + // but modern XTerm and emulators often have them. Let's add them, + // if the shifted right and left arrows are defined. + if t.KeyShfRight == "\x1b[1;2C" && t.KeyShfLeft == "\x1b[1;2D" { + t.KeyShfUp = "\x1b[1;2A" + t.KeyShfDown = "\x1b[1;2B" + t.KeyMetaUp = "\x1b[1;9A" + t.KeyMetaDown = "\x1b[1;9B" + t.KeyMetaRight = "\x1b[1;9C" + t.KeyMetaLeft = "\x1b[1;9D" + t.KeyAltUp = "\x1b[1;3A" + t.KeyAltDown = "\x1b[1;3B" + t.KeyAltRight = "\x1b[1;3C" + t.KeyAltLeft = "\x1b[1;3D" + t.KeyCtrlUp = "\x1b[1;5A" + t.KeyCtrlDown = "\x1b[1;5B" + t.KeyCtrlRight = "\x1b[1;5C" + t.KeyCtrlLeft = "\x1b[1;5D" + t.KeyAltShfUp = "\x1b[1;4A" + t.KeyAltShfDown = "\x1b[1;4B" + t.KeyAltShfRight = "\x1b[1;4C" + t.KeyAltShfLeft = "\x1b[1;4D" + + t.KeyMetaShfUp = "\x1b[1;10A" + t.KeyMetaShfDown = "\x1b[1;10B" + t.KeyMetaShfRight = "\x1b[1;10C" + t.KeyMetaShfLeft = "\x1b[1;10D" + + t.KeyCtrlShfUp = "\x1b[1;6A" + t.KeyCtrlShfDown = "\x1b[1;6B" + t.KeyCtrlShfRight = "\x1b[1;6C" + t.KeyCtrlShfLeft = "\x1b[1;6D" + + t.KeyShfPgUp = "\x1b[5;2~" + t.KeyShfPgDn = "\x1b[6;2~" + } + // And also for Home and End + if t.KeyShfHome == "\x1b[1;2H" && t.KeyShfEnd == "\x1b[1;2F" { + t.KeyCtrlHome = "\x1b[1;5H" + t.KeyCtrlEnd = "\x1b[1;5F" + t.KeyAltHome = "\x1b[1;9H" + t.KeyAltEnd = "\x1b[1;9F" + t.KeyCtrlShfHome = "\x1b[1;6H" + t.KeyCtrlShfEnd = "\x1b[1;6F" + t.KeyAltShfHome = "\x1b[1;4H" + t.KeyAltShfEnd = "\x1b[1;4F" + t.KeyMetaShfHome = "\x1b[1;10H" + t.KeyMetaShfEnd = "\x1b[1;10F" + } + + // And the same thing for rxvt and workalikes (Eterm, aterm, etc.) + // It seems that urxvt at least send escaped as ALT prefix for these, + // although some places seem to indicate a separate ALT key sesquence. + if t.KeyShfRight == "\x1b[c" && t.KeyShfLeft == "\x1b[d" { + t.KeyShfUp = "\x1b[a" + t.KeyShfDown = "\x1b[b" + t.KeyCtrlUp = "\x1b[Oa" + t.KeyCtrlDown = "\x1b[Ob" + t.KeyCtrlRight = "\x1b[Oc" + t.KeyCtrlLeft = "\x1b[Od" + } + if t.KeyShfHome == "\x1b[7$" && t.KeyShfEnd == "\x1b[8$" { + t.KeyCtrlHome = "\x1b[7^" + t.KeyCtrlEnd = "\x1b[8^" + } + + // Technically the RGB flag that is provided for xterm-direct is not + // quite right. The problem is that the -direct flag that was introduced + // with ncurses 6.1 requires a parsing for the parameters that we lack. + // For this case we'll just assume it's XTerm compatible. Someday this + // may be incorrect, but right now it is correct, and nobody uses it + // anyway. + if tc.getflag("Tc") { + // This presumes XTerm 24-bit true color. + t.TrueColor = true + } else if tc.getflag("RGB") { + // This is for xterm-direct, which uses a different scheme entirely. + // (ncurses went a very different direction from everyone else, and + // so it's unlikely anything is using this definition.) + t.TrueColor = true + t.SetBg = "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m" + t.SetFg = "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m" + } + + // We only support colors in ANSI 8 or 256 color mode. + if t.Colors < 8 || t.SetFg == "" { + t.Colors = 0 + } + if t.SetCursor == "" { + return nil, "", errNotAddressable + } + + // For padding, we lookup the pad char. If that isn't present, + // and npc is *not* set, then we assume a null byte. + t.PadChar = tc.getstr("pad") + if t.PadChar == "" { + if !tc.getflag("npc") { + t.PadChar = "\u0000" + } + } + + // For terminals that use "standard" SGR sequences, lets combine the + // foreground and background together. + if strings.HasPrefix(t.SetFg, "\x1b[") && + strings.HasPrefix(t.SetBg, "\x1b[") && + strings.HasSuffix(t.SetFg, "m") && + strings.HasSuffix(t.SetBg, "m") { + fg := t.SetFg[:len(t.SetFg)-1] + r := regexp.MustCompile("%p1") + bg := r.ReplaceAllString(t.SetBg[2:], "%p2") + t.SetFgBg = fg + ";" + bg + } + + return t, tc.desc, nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/e/emacs/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/e/emacs/term.go new file mode 100644 index 0000000000..b0b9b4771e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/e/emacs/term.go @@ -0,0 +1,63 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package emacs + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // gnu emacs term.el terminal emulation + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "eterm", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + AttrOff: "\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + AutoMargin: true, + }) + + // Emacs term.el terminal emulator term-protocol-version 0.96 + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "eterm-color", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[%p1%{30}%+%dm", + SetBg: "\x1b[%p1%'('%+%dm", + SetFgBg: "\x1b[%p1%{30}%+%d;%p2%'('%+%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/extended/extended.go b/vendor/github.com/gdamore/tcell/v2/terminfo/extended/extended.go new file mode 100644 index 0000000000..c69bef13d5 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/extended/extended.go @@ -0,0 +1,58 @@ +// Copyright 2020 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package extended contains an extended set of terminal descriptions. +// Applications desiring to have a better chance of Just Working by +// default should include this package. This will significantly increase +// the size of the program. +package extended + +import ( + // The following imports just register themselves -- + // these are the terminal types we aggregate in this package. + _ "github.com/gdamore/tcell/v2/terminfo/a/aixterm" + _ "github.com/gdamore/tcell/v2/terminfo/a/alacritty" + _ "github.com/gdamore/tcell/v2/terminfo/a/ansi" + _ "github.com/gdamore/tcell/v2/terminfo/b/beterm" + _ "github.com/gdamore/tcell/v2/terminfo/c/cygwin" + _ "github.com/gdamore/tcell/v2/terminfo/d/dtterm" + _ "github.com/gdamore/tcell/v2/terminfo/e/emacs" + _ "github.com/gdamore/tcell/v2/terminfo/f/foot" + _ "github.com/gdamore/tcell/v2/terminfo/g/gnome" + _ "github.com/gdamore/tcell/v2/terminfo/h/hpterm" + _ "github.com/gdamore/tcell/v2/terminfo/k/konsole" + _ "github.com/gdamore/tcell/v2/terminfo/k/kterm" + _ "github.com/gdamore/tcell/v2/terminfo/l/linux" + _ "github.com/gdamore/tcell/v2/terminfo/p/pcansi" + _ "github.com/gdamore/tcell/v2/terminfo/r/rxvt" + _ "github.com/gdamore/tcell/v2/terminfo/s/screen" + _ "github.com/gdamore/tcell/v2/terminfo/s/simpleterm" + _ "github.com/gdamore/tcell/v2/terminfo/s/sun" + _ "github.com/gdamore/tcell/v2/terminfo/t/termite" + _ "github.com/gdamore/tcell/v2/terminfo/t/tmux" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt100" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt102" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt220" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt320" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt400" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt420" + _ "github.com/gdamore/tcell/v2/terminfo/v/vt52" + _ "github.com/gdamore/tcell/v2/terminfo/w/wy50" + _ "github.com/gdamore/tcell/v2/terminfo/w/wy60" + _ "github.com/gdamore/tcell/v2/terminfo/w/wy99_ansi" + _ "github.com/gdamore/tcell/v2/terminfo/x/xfce" + _ "github.com/gdamore/tcell/v2/terminfo/x/xterm" + _ "github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty" + _ "github.com/gdamore/tcell/v2/terminfo/x/xterm_termite" +) diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/f/foot/foot.go b/vendor/github.com/gdamore/tcell/v2/terminfo/f/foot/foot.go new file mode 100644 index 0000000000..fb734cbdc2 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/f/foot/foot.go @@ -0,0 +1,69 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package foot + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // foot terminal emulator + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "foot", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h\x1b[22;0;0t", + ExitCA: "\x1b[?1049l\x1b[23;0;0t", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38:5:%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48:5:%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38:5:%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48:5:%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/g/gnome/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/g/gnome/term.go new file mode 100644 index 0000000000..e85a3a343d --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/g/gnome/term.go @@ -0,0 +1,130 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package gnome + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // GNOME Terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "gnome", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) + + // GNOME Terminal with xterm 256-colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "gnome-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/gen.sh b/vendor/github.com/gdamore/tcell/v2/terminfo/gen.sh new file mode 100644 index 0000000000..2fc0611234 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/gen.sh @@ -0,0 +1,18 @@ +while read line +do + case "$line" in + *'|'*) + alias=${line#*|} + line=${line%|*} + ;; + *) + alias=${line%%,*} + ;; + esac + + alias=${alias//-/_} + direc=${alias:0:1} + + mkdir -p ${direc}/${alias} + go run mkinfo.go -P ${alias} -go ${direc}/${alias}/term.go ${line//,/ } +done < models.txt diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/h/hpterm/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/h/hpterm/term.go new file mode 100644 index 0000000000..123bfb9390 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/h/hpterm/term.go @@ -0,0 +1,51 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package hpterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // hp X11 terminal emulator + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "hpterm", + Aliases: []string{"X-hpterm"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b&a0y0C\x1bJ", + AttrOff: "\x1b&d@\x0f", + Underline: "\x1b&dD", + Bold: "\x1b&dB", + Dim: "\x1b&dH", + Reverse: "\x1b&dB", + EnterKeypad: "\x1b&s1A", + ExitKeypad: "\x1b&s0A", + PadChar: "\x00", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + SetCursor: "\x1b&a%p1%dy%p2%dC", + CursorBack1: "\b", + CursorUp1: "\x1bA", + KeyUp: "\x1bA", + KeyDown: "\x1bB", + KeyRight: "\x1bC", + KeyLeft: "\x1bD", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bP", + KeyBackspace: "\b", + KeyHome: "\x1bh", + KeyPgUp: "\x1bV", + KeyPgDn: "\x1bU", + KeyF1: "\x1bp", + KeyF2: "\x1bq", + KeyF3: "\x1br", + KeyF4: "\x1bs", + KeyF5: "\x1bt", + KeyF6: "\x1bu", + KeyF7: "\x1bv", + KeyF8: "\x1bw", + KeyClear: "\x1bJ", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/k/konsole/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/k/konsole/term.go new file mode 100644 index 0000000000..236db9db2d --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/k/konsole/term.go @@ -0,0 +1,130 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package konsole + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // KDE console window + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "konsole", + Columns: 80, + Lines: 24, + Colors: 8, + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[<", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) + + // KDE console window with xterm 256-colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "konsole-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[<", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/k/kterm/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/k/kterm/term.go new file mode 100644 index 0000000000..eedbe6de0a --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/k/kterm/term.go @@ -0,0 +1,68 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package kterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // kterm kanji terminal emulator (X window system) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "kterm", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aajjkkllmmnnooppqqrrssttuuvvwwxx~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/l/linux/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/l/linux/term.go new file mode 100644 index 0000000000..8783b4c7ff --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/l/linux/term.go @@ -0,0 +1,71 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package linux + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // linux console + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "linux", + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + ShowCursor: "\x1b[?25h\x1b[?0c", + HideCursor: "\x1b[?25l\x1b[?1c", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "++,,--..00__``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}c~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[[A", + KeyF2: "\x1b[[B", + KeyF3: "\x1b[[C", + KeyF4: "\x1b[[D", + KeyF5: "\x1b[[E", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyBacktab: "\x1b[Z", + AutoMargin: true, + InsertChar: "\x1b[@", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/models.txt b/vendor/github.com/gdamore/tcell/v2/terminfo/models.txt new file mode 100644 index 0000000000..f0db81299e --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/models.txt @@ -0,0 +1,31 @@ +aixterm +alacritty +ansi +beterm +cygwin +dtterm +eterm,eterm-color|emacs +gnome,gnome-256color +hpterm +konsole,konsole-256color +kterm +linux +pcansi +rxvt,rxvt-256color,rxvt-88color,rxvt-unicode,rxvt-unicode-256color +screen,screen-256color +st,st-256color|simpleterm +termite +tmux +vt52 +vt100 +vt102 +vt220 +vt320 +vt400 +vt420 +wy50 +wy60 +wy99-ansi,wy99a-ansi +xfce +xterm,xterm-88color,xterm-256color +xterm-kitty diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/p/pcansi/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/p/pcansi/term.go new file mode 100644 index 0000000000..9e89c19772 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/p/pcansi/term.go @@ -0,0 +1,41 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package pcansi + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // ibm-pc terminal programs claiming to be ansi + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "pcansi", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[0;10m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[37;40m", + PadChar: "\x00", + AltChars: "+\x10,\x11-\x18.\x190\xdb`\x04a\xb1f\xf8g\xf1h\xb0j\xd9k\xbfl\xdam\xc0n\xc5o~p\xc4q\xc4r\xc4s_t\xc3u\xb4v\xc1w\xc2x\xb3y\xf3z\xf2{\xe3|\xd8}\x9c~\xfe", + EnterAcs: "\x1b[12m", + ExitAcs: "\x1b[10m", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\x1b[D", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyBackspace: "\b", + KeyHome: "\x1b[H", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/r/rxvt/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/r/rxvt/term.go new file mode 100644 index 0000000000..6fa9e7fa46 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/r/rxvt/term.go @@ -0,0 +1,485 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package rxvt + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // rxvt terminal emulator (X Window System) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "rxvt", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyF21: "\x1b[23$", + KeyF22: "\x1b[24$", + KeyF23: "\x1b[11^", + KeyF24: "\x1b[12^", + KeyF25: "\x1b[13^", + KeyF26: "\x1b[14^", + KeyF27: "\x1b[15^", + KeyF28: "\x1b[17^", + KeyF29: "\x1b[18^", + KeyF30: "\x1b[19^", + KeyF31: "\x1b[20^", + KeyF32: "\x1b[21^", + KeyF33: "\x1b[23^", + KeyF34: "\x1b[24^", + KeyF35: "\x1b[25^", + KeyF36: "\x1b[26^", + KeyF37: "\x1b[28^", + KeyF38: "\x1b[29^", + KeyF39: "\x1b[31^", + KeyF40: "\x1b[32^", + KeyF41: "\x1b[33^", + KeyF42: "\x1b[34^", + KeyF43: "\x1b[23@", + KeyF44: "\x1b[24@", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyShfDelete: "\x1b[3$", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + AutoMargin: true, + InsertChar: "\x1b[@", + }) + + // rxvt 2.7.9 with xterm 256-colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "rxvt-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyF21: "\x1b[23$", + KeyF22: "\x1b[24$", + KeyF23: "\x1b[11^", + KeyF24: "\x1b[12^", + KeyF25: "\x1b[13^", + KeyF26: "\x1b[14^", + KeyF27: "\x1b[15^", + KeyF28: "\x1b[17^", + KeyF29: "\x1b[18^", + KeyF30: "\x1b[19^", + KeyF31: "\x1b[20^", + KeyF32: "\x1b[21^", + KeyF33: "\x1b[23^", + KeyF34: "\x1b[24^", + KeyF35: "\x1b[25^", + KeyF36: "\x1b[26^", + KeyF37: "\x1b[28^", + KeyF38: "\x1b[29^", + KeyF39: "\x1b[31^", + KeyF40: "\x1b[32^", + KeyF41: "\x1b[33^", + KeyF42: "\x1b[34^", + KeyF43: "\x1b[23@", + KeyF44: "\x1b[24@", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyShfDelete: "\x1b[3$", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + AutoMargin: true, + InsertChar: "\x1b[@", + }) + + // rxvt 2.7.9 with xterm 88-colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "rxvt-88color", + Columns: 80, + Lines: 24, + Colors: 88, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyF21: "\x1b[23$", + KeyF22: "\x1b[24$", + KeyF23: "\x1b[11^", + KeyF24: "\x1b[12^", + KeyF25: "\x1b[13^", + KeyF26: "\x1b[14^", + KeyF27: "\x1b[15^", + KeyF28: "\x1b[17^", + KeyF29: "\x1b[18^", + KeyF30: "\x1b[19^", + KeyF31: "\x1b[20^", + KeyF32: "\x1b[21^", + KeyF33: "\x1b[23^", + KeyF34: "\x1b[24^", + KeyF35: "\x1b[25^", + KeyF36: "\x1b[26^", + KeyF37: "\x1b[28^", + KeyF38: "\x1b[29^", + KeyF39: "\x1b[31^", + KeyF40: "\x1b[32^", + KeyF41: "\x1b[33^", + KeyF42: "\x1b[34^", + KeyF43: "\x1b[23@", + KeyF44: "\x1b[24@", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyShfDelete: "\x1b[3$", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + AutoMargin: true, + InsertChar: "\x1b[@", + }) + + // rxvt-unicode terminal (X Window System) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "rxvt-unicode", + Columns: 80, + Lines: 24, + Colors: 88, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[r\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[38;5;%p1%dm", + SetBg: "\x1b[48;5;%p1%dm", + SetFgBg: "\x1b[38;5;%p1%d;48;5;%p2%dm", + ResetFgBg: "\x1b[39;49m", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyShfInsert: "\x1b[2$", + KeyShfDelete: "\x1b[3$", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + AutoMargin: true, + InsertChar: "\x1b[@", + }) + + // rxvt-unicode terminal with 256 colors (X Window System) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "rxvt-unicode-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[r\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + SetFg: "\x1b[38;5;%p1%dm", + SetBg: "\x1b[48;5;%p1%dm", + SetFgBg: "\x1b[38;5;%p1%d;48;5;%p2%dm", + ResetFgBg: "\x1b[39;49m", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[7~", + KeyEnd: "\x1b[8~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1b[11~", + KeyF2: "\x1b[12~", + KeyF3: "\x1b[13~", + KeyF4: "\x1b[14~", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyBacktab: "\x1b[Z", + KeyShfLeft: "\x1b[d", + KeyShfRight: "\x1b[c", + KeyShfUp: "\x1b[a", + KeyShfDown: "\x1b[b", + KeyShfHome: "\x1b[7$", + KeyShfEnd: "\x1b[8$", + KeyShfInsert: "\x1b[2$", + KeyShfDelete: "\x1b[3$", + KeyCtrlUp: "\x1b[Oa", + KeyCtrlDown: "\x1b[Ob", + KeyCtrlRight: "\x1b[Oc", + KeyCtrlLeft: "\x1b[Od", + KeyCtrlHome: "\x1b[7^", + KeyCtrlEnd: "\x1b[8^", + AutoMargin: true, + InsertChar: "\x1b[@", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/s/screen/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/s/screen/term.go new file mode 100644 index 0000000000..d95d636337 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/s/screen/term.go @@ -0,0 +1,128 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package screen + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // VT 100/ANSI X3.64 virtual terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "screen", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + AutoMargin: true, + }) + + // GNU Screen with 256 colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "screen-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/s/simpleterm/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/s/simpleterm/term.go new file mode 100644 index 0000000000..f633f29414 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/s/simpleterm/term.go @@ -0,0 +1,136 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package simpleterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // simpleterm + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "st", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + TrueColor: true, + AutoMargin: true, + }) + + // simpleterm with 256 colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "st-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "+C,D-A.B0E``aaffgghFiGjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + EnableAcs: "\x1b)0", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyClear: "\x1b[3;5~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + TrueColor: true, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/s/sun/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/s/sun/term.go new file mode 100644 index 0000000000..16cb96c20a --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/s/sun/term.go @@ -0,0 +1,112 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This terminal definition is hand-coded, as the default terminfo for +// this terminal is busted with respect to color. Unlike pretty much every +// other ANSI compliant terminal, this terminal cannot combine foreground and +// background escapes. The default terminfo also only provides escapes for +// 16-bit color. + +package sun + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // Sun Microsystems Inc. workstation console + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "sun", + Aliases: []string{"sun1", "sun2"}, + Columns: 80, + Lines: 34, + Bell: "\a", + Clear: "\f", + AttrOff: "\x1b[m", + Reverse: "\x1b[7m", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[247z", + KeyDelete: "\u007f", + KeyBackspace: "\b", + KeyHome: "\x1b[214z", + KeyEnd: "\x1b[220z", + KeyPgUp: "\x1b[216z", + KeyPgDn: "\x1b[222z", + KeyF1: "\x1b[224z", + KeyF2: "\x1b[225z", + KeyF3: "\x1b[226z", + KeyF4: "\x1b[227z", + KeyF5: "\x1b[228z", + KeyF6: "\x1b[229z", + KeyF7: "\x1b[230z", + KeyF8: "\x1b[231z", + KeyF9: "\x1b[232z", + KeyF10: "\x1b[233z", + KeyF11: "\x1b[234z", + KeyF12: "\x1b[235z", + AutoMargin: true, + InsertChar: "\x1b[@", + }) + + // Sun Microsystems Workstation console with color support (IA systems) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "sun-color", + Columns: 80, + Lines: 34, + Colors: 256, + Bell: "\a", + Clear: "\f", + AttrOff: "\x1b[m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + SetFg: "\x1b[38;5;%p1%dm", + SetBg: "\x1b[48;5;%p1%dm", + ResetFgBg: "\x1b[0m", + PadChar: "\x00", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[247z", + KeyDelete: "\u007f", + KeyBackspace: "\b", + KeyHome: "\x1b[214z", + KeyEnd: "\x1b[220z", + KeyPgUp: "\x1b[216z", + KeyPgDn: "\x1b[222z", + KeyF1: "\x1b[224z", + KeyF2: "\x1b[225z", + KeyF3: "\x1b[226z", + KeyF4: "\x1b[227z", + KeyF5: "\x1b[228z", + KeyF6: "\x1b[229z", + KeyF7: "\x1b[230z", + KeyF8: "\x1b[231z", + KeyF9: "\x1b[232z", + KeyF10: "\x1b[233z", + KeyF11: "\x1b[234z", + KeyF12: "\x1b[235z", + AutoMargin: true, + InsertChar: "\x1b[@", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/t/termite/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/t/termite/term.go new file mode 100644 index 0000000000..593d385561 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/t/termite/term.go @@ -0,0 +1,67 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package termite + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // VTE-based terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "termite", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Italic: "\x1b[3m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + InsertChar: "\x1b[@", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/t/tmux/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/t/tmux/term.go new file mode 100644 index 0000000000..44975d69a4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/t/tmux/term.go @@ -0,0 +1,71 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package tmux + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // tmux terminal multiplexer + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "tmux", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyEnd: "\x1b[4~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/terminfo.go b/vendor/github.com/gdamore/tcell/v2/terminfo/terminfo.go new file mode 100644 index 0000000000..7e17352cc4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/terminfo.go @@ -0,0 +1,827 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package terminfo + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrTermNotFound indicates that a suitable terminal entry could + // not be found. This can result from either not having TERM set, + // or from the TERM failing to support certain minimal functionality, + // in particular absolute cursor addressability (the cup capability) + // is required. For example, legacy "adm3" lacks this capability, + // whereas the slightly newer "adm3a" supports it. This failure + // occurs most often with "dumb". + ErrTermNotFound = errors.New("terminal entry not found") +) + +// Terminfo represents a terminfo entry. Note that we use friendly names +// in Go, but when we write out JSON, we use the same names as terminfo. +// The name, aliases and smous, rmous fields do not come from terminfo directly. +type Terminfo struct { + Name string + Aliases []string + Columns int // cols + Lines int // lines + Colors int // colors + Bell string // bell + Clear string // clear + EnterCA string // smcup + ExitCA string // rmcup + ShowCursor string // cnorm + HideCursor string // civis + AttrOff string // sgr0 + Underline string // smul + Bold string // bold + Blink string // blink + Reverse string // rev + Dim string // dim + Italic string // sitm + EnterKeypad string // smkx + ExitKeypad string // rmkx + SetFg string // setaf + SetBg string // setab + ResetFgBg string // op + SetCursor string // cup + CursorBack1 string // cub1 + CursorUp1 string // cuu1 + PadChar string // pad + KeyBackspace string // kbs + KeyF1 string // kf1 + KeyF2 string // kf2 + KeyF3 string // kf3 + KeyF4 string // kf4 + KeyF5 string // kf5 + KeyF6 string // kf6 + KeyF7 string // kf7 + KeyF8 string // kf8 + KeyF9 string // kf9 + KeyF10 string // kf10 + KeyF11 string // kf11 + KeyF12 string // kf12 + KeyF13 string // kf13 + KeyF14 string // kf14 + KeyF15 string // kf15 + KeyF16 string // kf16 + KeyF17 string // kf17 + KeyF18 string // kf18 + KeyF19 string // kf19 + KeyF20 string // kf20 + KeyF21 string // kf21 + KeyF22 string // kf22 + KeyF23 string // kf23 + KeyF24 string // kf24 + KeyF25 string // kf25 + KeyF26 string // kf26 + KeyF27 string // kf27 + KeyF28 string // kf28 + KeyF29 string // kf29 + KeyF30 string // kf30 + KeyF31 string // kf31 + KeyF32 string // kf32 + KeyF33 string // kf33 + KeyF34 string // kf34 + KeyF35 string // kf35 + KeyF36 string // kf36 + KeyF37 string // kf37 + KeyF38 string // kf38 + KeyF39 string // kf39 + KeyF40 string // kf40 + KeyF41 string // kf41 + KeyF42 string // kf42 + KeyF43 string // kf43 + KeyF44 string // kf44 + KeyF45 string // kf45 + KeyF46 string // kf46 + KeyF47 string // kf47 + KeyF48 string // kf48 + KeyF49 string // kf49 + KeyF50 string // kf50 + KeyF51 string // kf51 + KeyF52 string // kf52 + KeyF53 string // kf53 + KeyF54 string // kf54 + KeyF55 string // kf55 + KeyF56 string // kf56 + KeyF57 string // kf57 + KeyF58 string // kf58 + KeyF59 string // kf59 + KeyF60 string // kf60 + KeyF61 string // kf61 + KeyF62 string // kf62 + KeyF63 string // kf63 + KeyF64 string // kf64 + KeyInsert string // kich1 + KeyDelete string // kdch1 + KeyHome string // khome + KeyEnd string // kend + KeyHelp string // khlp + KeyPgUp string // kpp + KeyPgDn string // knp + KeyUp string // kcuu1 + KeyDown string // kcud1 + KeyLeft string // kcub1 + KeyRight string // kcuf1 + KeyBacktab string // kcbt + KeyExit string // kext + KeyClear string // kclr + KeyPrint string // kprt + KeyCancel string // kcan + Mouse string // kmous + AltChars string // acsc + EnterAcs string // smacs + ExitAcs string // rmacs + EnableAcs string // enacs + KeyShfRight string // kRIT + KeyShfLeft string // kLFT + KeyShfHome string // kHOM + KeyShfEnd string // kEND + KeyShfInsert string // kIC + KeyShfDelete string // kDC + + // These are non-standard extensions to terminfo. This includes + // true color support, and some additional keys. Its kind of bizarre + // that shifted variants of left and right exist, but not up and down. + // Terminal support for these are going to vary amongst XTerm + // emulations, so don't depend too much on them in your application. + + StrikeThrough string // smxx + SetFgBg string // setfgbg + SetFgBgRGB string // setfgbgrgb + SetFgRGB string // setfrgb + SetBgRGB string // setbrgb + KeyShfUp string // shift-up + KeyShfDown string // shift-down + KeyShfPgUp string // shift-kpp + KeyShfPgDn string // shift-knp + KeyCtrlUp string // ctrl-up + KeyCtrlDown string // ctrl-left + KeyCtrlRight string // ctrl-right + KeyCtrlLeft string // ctrl-left + KeyMetaUp string // meta-up + KeyMetaDown string // meta-left + KeyMetaRight string // meta-right + KeyMetaLeft string // meta-left + KeyAltUp string // alt-up + KeyAltDown string // alt-left + KeyAltRight string // alt-right + KeyAltLeft string // alt-left + KeyCtrlHome string + KeyCtrlEnd string + KeyMetaHome string + KeyMetaEnd string + KeyAltHome string + KeyAltEnd string + KeyAltShfUp string + KeyAltShfDown string + KeyAltShfLeft string + KeyAltShfRight string + KeyMetaShfUp string + KeyMetaShfDown string + KeyMetaShfLeft string + KeyMetaShfRight string + KeyCtrlShfUp string + KeyCtrlShfDown string + KeyCtrlShfLeft string + KeyCtrlShfRight string + KeyCtrlShfHome string + KeyCtrlShfEnd string + KeyAltShfHome string + KeyAltShfEnd string + KeyMetaShfHome string + KeyMetaShfEnd string + EnablePaste string // bracketed paste mode + DisablePaste string + PasteStart string + PasteEnd string + Modifiers int + InsertChar string // string to insert a character (ich1) + AutoMargin bool // true if writing to last cell in line advances + TrueColor bool // true if the terminal supports direct color +} + +const ( + ModifiersNone = 0 + ModifiersXTerm = 1 +) + +type stackElem struct { + s string + i int + isStr bool + isInt bool +} + +type stack []stackElem + +func (st stack) Push(v string) stack { + e := stackElem{ + s: v, + isStr: true, + } + return append(st, e) +} + +func (st stack) Pop() (string, stack) { + v := "" + if len(st) > 0 { + e := st[len(st)-1] + st = st[:len(st)-1] + if e.isStr { + v = e.s + } else { + v = strconv.Itoa(e.i) + } + } + return v, st +} + +func (st stack) PopInt() (int, stack) { + if len(st) > 0 { + e := st[len(st)-1] + st = st[:len(st)-1] + if e.isInt { + return e.i, st + } else if e.isStr { + // If the string that was pushed was the representation + // of a number e.g. '123', then return the number. If the + // conversion doesn't work, assume the string pushed was + // intended to return, as an int, the ascii representation + // of the (one and only) character. + i, err := strconv.Atoi(e.s) + if err == nil { + return i, st + } else if len(e.s) >= 1 { + return int(e.s[0]), st + } + } + } + return 0, st +} + +func (st stack) PopBool() (bool, stack) { + if len(st) > 0 { + e := st[len(st)-1] + st = st[:len(st)-1] + if e.isStr { + if e.s == "1" { + return true, st + } + return false, st + } else if e.i == 1 { + return true, st + } else { + return false, st + } + } + return false, st +} + +func (st stack) PushInt(i int) stack { + e := stackElem{ + i: i, + isInt: true, + } + return append(st, e) +} + +func (st stack) PushBool(i bool) stack { + if i { + return st.PushInt(1) + } + return st.PushInt(0) +} + +// static vars +var svars [26]string + +// paramsBuffer handles some persistent state for TParam. Technically we +// could probably dispense with this, but caching buffer arrays gives us +// a nice little performance boost. Furthermore, we know that TParam is +// rarely (never?) called re-entrantly, so we can just reuse the same +// buffers, making it thread-safe by stashing a lock. +type paramsBuffer struct { + out bytes.Buffer + buf bytes.Buffer + lk sync.Mutex +} + +// Start initializes the params buffer with the initial string data. +// It also locks the paramsBuffer. The caller must call End() when +// finished. +func (pb *paramsBuffer) Start(s string) { + pb.lk.Lock() + pb.out.Reset() + pb.buf.Reset() + pb.buf.WriteString(s) +} + +// End returns the final output from TParam, but it also releases the lock. +func (pb *paramsBuffer) End() string { + s := pb.out.String() + pb.lk.Unlock() + return s +} + +// NextCh returns the next input character to the expander. +func (pb *paramsBuffer) NextCh() (byte, error) { + return pb.buf.ReadByte() +} + +// PutCh "emits" (rather schedules for output) a single byte character. +func (pb *paramsBuffer) PutCh(ch byte) { + pb.out.WriteByte(ch) +} + +// PutString schedules a string for output. +func (pb *paramsBuffer) PutString(s string) { + pb.out.WriteString(s) +} + +var pb = ¶msBuffer{} + +// TParm takes a terminfo parameterized string, such as setaf or cup, and +// evaluates the string, and returns the result with the parameter +// applied. +func (t *Terminfo) TParm(s string, p ...int) string { + var stk stack + var a, b string + var ai, bi int + var ab bool + var dvars [26]string + var params [9]int + + pb.Start(s) + + // make sure we always have 9 parameters -- makes it easier + // later to skip checks + for i := 0; i < len(params) && i < len(p); i++ { + params[i] = p[i] + } + + nest := 0 + + for { + + ch, err := pb.NextCh() + if err != nil { + break + } + + if ch != '%' { + pb.PutCh(ch) + continue + } + + ch, err = pb.NextCh() + if err != nil { + // XXX Error + break + } + + switch ch { + case '%': // quoted % + pb.PutCh(ch) + + case 'i': // increment both parameters (ANSI cup support) + params[0]++ + params[1]++ + + case 'c', 's': + // NB: these, and 'd' below are special cased for + // efficiency. They could be handled by the richer + // format support below, less efficiently. + a, stk = stk.Pop() + pb.PutString(a) + + case 'd': + ai, stk = stk.PopInt() + pb.PutString(strconv.Itoa(ai)) + + case '0', '1', '2', '3', '4', 'x', 'X', 'o', ':': + // This is pretty suboptimal, but this is rarely used. + // None of the mainstream terminals use any of this, + // and it would surprise me if this code is ever + // executed outside of test cases. + f := "%" + if ch == ':' { + ch, _ = pb.NextCh() + } + f += string(ch) + for ch == '+' || ch == '-' || ch == '#' || ch == ' ' { + ch, _ = pb.NextCh() + f += string(ch) + } + for (ch >= '0' && ch <= '9') || ch == '.' { + ch, _ = pb.NextCh() + f += string(ch) + } + switch ch { + case 'd', 'x', 'X', 'o': + ai, stk = stk.PopInt() + pb.PutString(fmt.Sprintf(f, ai)) + case 'c', 's': + a, stk = stk.Pop() + pb.PutString(fmt.Sprintf(f, a)) + } + + case 'p': // push parameter + ch, _ = pb.NextCh() + ai = int(ch - '1') + if ai >= 0 && ai < len(params) { + stk = stk.PushInt(params[ai]) + } else { + stk = stk.PushInt(0) + } + + case 'P': // pop & store variable + ch, _ = pb.NextCh() + if ch >= 'A' && ch <= 'Z' { + svars[int(ch-'A')], stk = stk.Pop() + } else if ch >= 'a' && ch <= 'z' { + dvars[int(ch-'a')], stk = stk.Pop() + } + + case 'g': // recall & push variable + ch, _ = pb.NextCh() + if ch >= 'A' && ch <= 'Z' { + stk = stk.Push(svars[int(ch-'A')]) + } else if ch >= 'a' && ch <= 'z' { + stk = stk.Push(dvars[int(ch-'a')]) + } + + case '\'': // push(char) + ch, _ = pb.NextCh() + pb.NextCh() // must be ' but we don't check + stk = stk.Push(string(ch)) + + case '{': // push(int) + ai = 0 + ch, _ = pb.NextCh() + for ch >= '0' && ch <= '9' { + ai *= 10 + ai += int(ch - '0') + ch, _ = pb.NextCh() + } + // ch must be '}' but no verification + stk = stk.PushInt(ai) + + case 'l': // push(strlen(pop)) + a, stk = stk.Pop() + stk = stk.PushInt(len(a)) + + case '+': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai + bi) + + case '-': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai - bi) + + case '*': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai * bi) + + case '/': + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + if bi != 0 { + stk = stk.PushInt(ai / bi) + } else { + stk = stk.PushInt(0) + } + + case 'm': // push(pop mod pop) + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + if bi != 0 { + stk = stk.PushInt(ai % bi) + } else { + stk = stk.PushInt(0) + } + + case '&': // AND + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai & bi) + + case '|': // OR + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai | bi) + + case '^': // XOR + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushInt(ai ^ bi) + + case '~': // bit complement + ai, stk = stk.PopInt() + stk = stk.PushInt(ai ^ -1) + + case '!': // logical NOT + ai, stk = stk.PopInt() + stk = stk.PushBool(ai != 0) + + case '=': // numeric compare or string compare + b, stk = stk.Pop() + a, stk = stk.Pop() + stk = stk.PushBool(a == b) + + case '>': // greater than, numeric + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushBool(ai > bi) + + case '<': // less than, numeric + bi, stk = stk.PopInt() + ai, stk = stk.PopInt() + stk = stk.PushBool(ai < bi) + + case '?': // start conditional + + case 't': + ab, stk = stk.PopBool() + if ab { + // just keep going + break + } + nest = 0 + ifloop: + // this loop consumes everything until we hit our else, + // or the end of the conditional + for { + ch, err = pb.NextCh() + if err != nil { + break + } + if ch != '%' { + continue + } + ch, _ = pb.NextCh() + switch ch { + case ';': + if nest == 0 { + break ifloop + } + nest-- + case '?': + nest++ + case 'e': + if nest == 0 { + break ifloop + } + } + } + + case 'e': + // if we got here, it means we didn't use the else + // in the 't' case above, and we should skip until + // the end of the conditional + nest = 0 + elloop: + for { + ch, err = pb.NextCh() + if err != nil { + break + } + if ch != '%' { + continue + } + ch, _ = pb.NextCh() + switch ch { + case ';': + if nest == 0 { + break elloop + } + nest-- + case '?': + nest++ + } + } + + case ';': // endif + + } + } + + return pb.End() +} + +// TPuts emits the string to the writer, but expands inline padding +// indications (of the form $<[delay]> where [delay] is msec) to +// a suitable time (unless the terminfo string indicates this isn't needed +// by specifying npc - no padding). All Terminfo based strings should be +// emitted using this function. +func (t *Terminfo) TPuts(w io.Writer, s string) { + for { + beg := strings.Index(s, "$<") + if beg < 0 { + // Most strings don't need padding, which is good news! + io.WriteString(w, s) + return + } + io.WriteString(w, s[:beg]) + s = s[beg+2:] + end := strings.Index(s, ">") + if end < 0 { + // unterminated.. just emit bytes unadulterated + io.WriteString(w, "$<"+s) + return + } + val := s[:end] + s = s[end+1:] + padus := 0 + unit := time.Millisecond + dot := false + loop: + for i := range val { + switch val[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + padus *= 10 + padus += int(val[i] - '0') + if dot { + unit /= 10 + } + case '.': + if !dot { + dot = true + } else { + break loop + } + default: + break loop + } + } + + // Curses historically uses padding to achieve "fine grained" + // delays. We have much better clocks these days, and so we + // do not rely on padding but simply sleep a bit. + if len(t.PadChar) > 0 { + time.Sleep(unit * time.Duration(padus)) + } + } +} + +// TGoto returns a string suitable for addressing the cursor at the given +// row and column. The origin 0, 0 is in the upper left corner of the screen. +func (t *Terminfo) TGoto(col, row int) string { + return t.TParm(t.SetCursor, row, col) +} + +// TColor returns a string corresponding to the given foreground and background +// colors. Either fg or bg can be set to -1 to elide. +func (t *Terminfo) TColor(fi, bi int) string { + rv := "" + // As a special case, we map bright colors to lower versions if the + // color table only holds 8. For the remaining 240 colors, the user + // is out of luck. Someday we could create a mapping table, but its + // not worth it. + if t.Colors == 8 { + if fi > 7 && fi < 16 { + fi -= 8 + } + if bi > 7 && bi < 16 { + bi -= 8 + } + } + if t.Colors > fi && fi >= 0 { + rv += t.TParm(t.SetFg, fi) + } + if t.Colors > bi && bi >= 0 { + rv += t.TParm(t.SetBg, bi) + } + return rv +} + +var ( + dblock sync.Mutex + terminfos = make(map[string]*Terminfo) + aliases = make(map[string]string) +) + +// AddTerminfo can be called to register a new Terminfo entry. +func AddTerminfo(t *Terminfo) { + dblock.Lock() + terminfos[t.Name] = t + for _, x := range t.Aliases { + terminfos[x] = t + } + dblock.Unlock() +} + +// LookupTerminfo attempts to find a definition for the named $TERM. +func LookupTerminfo(name string) (*Terminfo, error) { + if name == "" { + // else on windows: index out of bounds + // on the name[0] reference below + return nil, ErrTermNotFound + } + + addtruecolor := false + add256color := false + switch os.Getenv("COLORTERM") { + case "truecolor", "24bit", "24-bit": + addtruecolor = true + } + dblock.Lock() + t := terminfos[name] + dblock.Unlock() + + // If the name ends in -truecolor, then fabricate an entry + // from the corresponding -256color, -color, or bare terminal. + if t != nil && t.TrueColor { + addtruecolor = true + } else if t == nil && strings.HasSuffix(name, "-truecolor") { + + suffixes := []string{ + "-256color", + "-88color", + "-color", + "", + } + base := name[:len(name)-len("-truecolor")] + for _, s := range suffixes { + if t, _ = LookupTerminfo(base + s); t != nil { + addtruecolor = true + break + } + } + } + + // If the name ends in -256color, maybe fabricate using the xterm 256 color sequences + if t == nil && strings.HasSuffix(name, "-256color") { + suffixes := []string{ + "-88color", + "-color", + } + base := name[:len(name)-len("-256color")] + for _, s := range suffixes { + if t, _ = LookupTerminfo(base + s); t != nil { + add256color = true + break + } + } + } + + if t == nil { + return nil, ErrTermNotFound + } + + switch os.Getenv("TCELL_TRUECOLOR") { + case "": + case "disable": + addtruecolor = false + default: + addtruecolor = true + } + + // If the user has requested 24-bit color with $COLORTERM, then + // amend the value (unless already present). This means we don't + // need to have a value present. + if addtruecolor && + t.SetFgBgRGB == "" && + t.SetFgRGB == "" && + t.SetBgRGB == "" { + + // Supply vanilla ISO 8613-6:1994 24-bit color sequences. + t.SetFgRGB = "\x1b[38;2;%p1%d;%p2%d;%p3%dm" + t.SetBgRGB = "\x1b[48;2;%p1%d;%p2%d;%p3%dm" + t.SetFgBgRGB = "\x1b[38;2;%p1%d;%p2%d;%p3%d;" + + "48;2;%p4%d;%p5%d;%p6%dm" + } + + if add256color { + t.Colors = 256 + t.SetFg = "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m" + t.SetBg = "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m" + t.SetFgBg = "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m" + t.ResetFgBg = "\x1b[39;49m" + } + return t, nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt100/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt100/term.go new file mode 100644 index 0000000000..0ae3918aca --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt100/term.go @@ -0,0 +1,49 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package vt100 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // dec vt100 (w/advanced video) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "vt100", + Aliases: []string{"vt100-am"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<50>", + AttrOff: "\x1b[m\x0f$<2>", + Underline: "\x1b[4m$<2>", + Bold: "\x1b[1m$<2>", + Blink: "\x1b[5m$<2>", + Reverse: "\x1b[7m$<2>", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH$<5>", + CursorBack1: "\b", + CursorUp1: "\x1b[A$<2>", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1bOt", + KeyF6: "\x1bOu", + KeyF7: "\x1bOv", + KeyF8: "\x1bOl", + KeyF9: "\x1bOw", + KeyF10: "\x1bOx", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt102/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt102/term.go new file mode 100644 index 0000000000..ec8dae2468 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt102/term.go @@ -0,0 +1,48 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package vt102 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // dec vt102 + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "vt102", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<50>", + AttrOff: "\x1b[m\x0f$<2>", + Underline: "\x1b[4m$<2>", + Bold: "\x1b[1m$<2>", + Blink: "\x1b[5m$<2>", + Reverse: "\x1b[7m$<2>", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b(B\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH$<5>", + CursorBack1: "\b", + CursorUp1: "\x1b[A$<2>", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1bOt", + KeyF6: "\x1bOu", + KeyF7: "\x1bOv", + KeyF8: "\x1bOl", + KeyF9: "\x1bOw", + KeyF10: "\x1bOx", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt220/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt220/term.go new file mode 100644 index 0000000000..75ab9a8ca4 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt220/term.go @@ -0,0 +1,59 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package vt220 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // dec vt220 + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "vt220", + Aliases: []string{"vt200"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[J", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0$<2>", + ExitAcs: "\x1b(B$<4>", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + KeyHelp: "\x1b[28~", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt320/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt320/term.go new file mode 100644 index 0000000000..3fd3d39f0f --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt320/term.go @@ -0,0 +1,64 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package vt320 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // dec vt320 7 bit terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "vt320", + Aliases: []string{"vt300"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1b[1~", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF13: "\x1b[25~", + KeyF14: "\x1b[26~", + KeyF15: "\x1b[28~", + KeyF16: "\x1b[29~", + KeyF17: "\x1b[31~", + KeyF18: "\x1b[32~", + KeyF19: "\x1b[33~", + KeyF20: "\x1b[34~", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt400/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt400/term.go new file mode 100644 index 0000000000..0c07e9d2c5 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt400/term.go @@ -0,0 +1,48 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package vt400 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // dec vt400 24x80 column autowrap + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "vt400", + Aliases: []string{"vt400-24", "dec-vt400"}, + Columns: 80, + Lines: 24, + Clear: "\x1b[H\x1b[J$<10/>", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + AutoMargin: true, + InsertChar: "\x1b[@", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt420/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt420/term.go new file mode 100644 index 0000000000..094886e270 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt420/term.go @@ -0,0 +1,54 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package vt420 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // DEC VT420 + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "vt420", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b[H\x1b[2J$<50>", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x1b(B$<2>", + Underline: "\x1b[4m", + Bold: "\x1b[1m$<2>", + Blink: "\x1b[5m$<2>", + Reverse: "\x1b[7m$<2>", + EnterKeypad: "\x1b=", + ExitKeypad: "\x1b>", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0$<2>", + ExitAcs: "\x1b(B$<4>", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH$<10>", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1b[A", + KeyDown: "\x1b[B", + KeyRight: "\x1b[C", + KeyLeft: "\x1b[D", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\b", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[17~", + KeyF6: "\x1b[18~", + KeyF7: "\x1b[19~", + KeyF8: "\x1b[20~", + KeyF9: "\x1b[21~", + KeyF10: "\x1b[29~", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt52/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt52/term.go new file mode 100644 index 0000000000..ba49f7f5ee --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/v/vt52/term.go @@ -0,0 +1,29 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package vt52 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // dec vt52 + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "vt52", + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1bH\x1bJ", + PadChar: "\x00", + AltChars: "+h.k0affggolpnqprrss", + EnterAcs: "\x1bF", + ExitAcs: "\x1bG", + SetCursor: "\x1bY%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\x1bD", + CursorUp1: "\x1bA", + KeyUp: "\x1bA", + KeyDown: "\x1bB", + KeyRight: "\x1bC", + KeyLeft: "\x1bD", + KeyBackspace: "\b", + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy50/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy50/term.go new file mode 100644 index 0000000000..beced62d5c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy50/term.go @@ -0,0 +1,60 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package wy50 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // Wyse 50 + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "wy50", + Aliases: []string{"wyse50"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b+$<20>", + ShowCursor: "\x1b`1", + HideCursor: "\x1b`0", + AttrOff: "\x1b(\x1bH\x03", + Dim: "\x1b`7\x1b)", + Reverse: "\x1b`6\x1b)", + PadChar: "\x00", + AltChars: "a;j5k3l2m1n8q:t4u9v=w0x6", + EnterAcs: "\x1bH\x02", + ExitAcs: "\x1bH\x03", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\n", + KeyRight: "\f", + KeyLeft: "\b", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bW", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyPgUp: "\x1bJ", + KeyPgDn: "\x1bK", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + KeyF10: "\x01I\r", + KeyF11: "\x01J\r", + KeyF12: "\x01K\r", + KeyF13: "\x01L\r", + KeyF14: "\x01M\r", + KeyF15: "\x01N\r", + KeyF16: "\x01O\r", + KeyPrint: "\x1bP", + KeyBacktab: "\x1bI", + KeyShfHome: "\x1b{", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy60/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy60/term.go new file mode 100644 index 0000000000..5b79310a77 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy60/term.go @@ -0,0 +1,64 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package wy60 + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // Wyse 60 + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "wy60", + Aliases: []string{"wyse60"}, + Columns: 80, + Lines: 24, + Bell: "\a", + Clear: "\x1b+$<100>", + EnterCA: "\x1bw0", + ExitCA: "\x1bw1", + ShowCursor: "\x1b`1", + HideCursor: "\x1b`0", + AttrOff: "\x1b(\x1bH\x03\x1bG0\x1bcD", + Underline: "\x1bG8", + Dim: "\x1bGp", + Blink: "\x1bG2", + Reverse: "\x1bG4", + PadChar: "\x00", + AltChars: "+/,.0[a2fxgqh1ihjYk?lZm@nEqDtCu4vAwBx3yszr{c~~", + EnterAcs: "\x1bcE", + ExitAcs: "\x1bcD", + SetCursor: "\x1b=%p1%' '%+%c%p2%' '%+%c", + CursorBack1: "\b", + CursorUp1: "\v", + KeyUp: "\v", + KeyDown: "\n", + KeyRight: "\f", + KeyLeft: "\b", + KeyInsert: "\x1bQ", + KeyDelete: "\x1bW", + KeyBackspace: "\b", + KeyHome: "\x1e", + KeyPgUp: "\x1bJ", + KeyPgDn: "\x1bK", + KeyF1: "\x01@\r", + KeyF2: "\x01A\r", + KeyF3: "\x01B\r", + KeyF4: "\x01C\r", + KeyF5: "\x01D\r", + KeyF6: "\x01E\r", + KeyF7: "\x01F\r", + KeyF8: "\x01G\r", + KeyF9: "\x01H\r", + KeyF10: "\x01I\r", + KeyF11: "\x01J\r", + KeyF12: "\x01K\r", + KeyF13: "\x01L\r", + KeyF14: "\x01M\r", + KeyF15: "\x01N\r", + KeyF16: "\x01O\r", + KeyPrint: "\x1bP", + KeyBacktab: "\x1bI", + KeyShfHome: "\x1b{", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy99_ansi/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy99_ansi/term.go new file mode 100644 index 0000000000..af470e46f3 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/w/wy99_ansi/term.go @@ -0,0 +1,116 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package wy99_ansi + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // Wyse WY-99GT in ansi mode (int'l PC keyboard) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "wy99-ansi", + Columns: 80, + Lines: 25, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<200>", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f\x1b[\"q", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h", + ExitKeypad: "\x1b[?1l", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooqqssttuuvvwwxx{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b$<1>", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[M", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF17: "\x1b[K", + KeyF18: "\x1b[31~", + KeyF19: "\x1b[32~", + KeyF20: "\x1b[33~", + KeyF21: "\x1b[34~", + KeyF22: "\x1b[35~", + KeyF23: "\x1b[1~", + KeyF24: "\x1b[2~", + KeyBacktab: "\x1b[z", + AutoMargin: true, + }) + + // Wyse WY-99GT in ansi mode (US PC keyboard) + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "wy99a-ansi", + Columns: 80, + Lines: 25, + Bell: "\a", + Clear: "\x1b[H\x1b[J$<200>", + ShowCursor: "\x1b[34h\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[m\x0f\x1b[\"q", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h", + ExitKeypad: "\x1b[?1l", + PadChar: "\x00", + AltChars: "``aaffggjjkkllmmnnooqqssttuuvvwwxx{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b$<1>", + CursorUp1: "\x1bM", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyBackspace: "\b", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[M", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyF17: "\x1b[K", + KeyF18: "\x1b[31~", + KeyF19: "\x1b[32~", + KeyF20: "\x1b[33~", + KeyF21: "\x1b[34~", + KeyF22: "\x1b[35~", + KeyF23: "\x1b[1~", + KeyF24: "\x1b[2~", + KeyBacktab: "\x1b[z", + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/x/xfce/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xfce/term.go new file mode 100644 index 0000000000..d70b2e910c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xfce/term.go @@ -0,0 +1,67 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package xfce + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // Xfce Terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "xfce", + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b7\x1b[?47h", + ExitCA: "\x1b[2J\x1b[?47l\x1b8", + ShowCursor: "\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b[0m\x0f", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + PadChar: "\x00", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x0e", + ExitAcs: "\x0f", + EnableAcs: "\x1b)0", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/direct.go b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/direct.go new file mode 100644 index 0000000000..358ebae91c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/direct.go @@ -0,0 +1,92 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This terminal definition is derived from the xterm-256color definition, but +// makes use of the RGB property these terminals have to support direct color. +// The terminfo entry for this uses a new format for the color handling introduced +// by ncurses 6.1 (and used by nobody else), so this override ensures we get +// good handling even in the face of this. + +package xterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // derived from xterm-256color, but adds full RGB support + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "xterm-direct", + Aliases: []string{"xterm-truecolor"}, + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h\x1b[22;0;0t", + ExitCA: "\x1b[?1049l\x1b[23;0;0t", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + SetFgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%dm", + SetBgRGB: "\x1b[48;2;%p1%d;%p2%d;%p3%dm", + SetFgBgRGB: "\x1b[38;2;%p1%d;%p2%d;%p3%d;48;2;%p4%d;%p5%d;%p6%dm", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + TrueColor: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/term.go new file mode 100644 index 0000000000..daa19c3f17 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm/term.go @@ -0,0 +1,192 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package xterm + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // X11 terminal emulator + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "xterm", + Aliases: []string{"xterm-debian"}, + Columns: 80, + Lines: 24, + Colors: 8, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h\x1b[22;0;0t", + ExitCA: "\x1b[?1049l\x1b[23;0;0t", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[3%p1%dm", + SetBg: "\x1b[4%p1%dm", + SetFgBg: "\x1b[3%p1%d;4%p2%dm", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) + + // xterm with 88 colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "xterm-88color", + Columns: 80, + Lines: 24, + Colors: 88, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h\x1b[22;0;0t", + ExitCA: "\x1b[?1049l\x1b[23;0;0t", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) + + // xterm with 256 colors + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "xterm-256color", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h\x1b[22;0;0t", + ExitCA: "\x1b[?1049l\x1b[23;0;0t", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Blink: "\x1b[5m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty/term.go new file mode 100644 index 0000000000..ab50003e05 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_kitty/term.go @@ -0,0 +1,69 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package xterm_kitty + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // KovIdTTY + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "xterm-kitty", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h", + ExitKeypad: "\x1b[?1l", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + StrikeThrough: "\x1b[9m", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + TrueColor: true, + AutoMargin: true, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_termite/term.go b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_termite/term.go new file mode 100644 index 0000000000..f2d0221014 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terminfo/x/xterm_termite/term.go @@ -0,0 +1,66 @@ +// Generated automatically. DO NOT HAND-EDIT. + +package xterm_termite + +import "github.com/gdamore/tcell/v2/terminfo" + +func init() { + + // VTE-based terminal + terminfo.AddTerminfo(&terminfo.Terminfo{ + Name: "xterm-termite", + Columns: 80, + Lines: 24, + Colors: 256, + Bell: "\a", + Clear: "\x1b[H\x1b[2J", + EnterCA: "\x1b[?1049h", + ExitCA: "\x1b[?1049l", + ShowCursor: "\x1b[?12l\x1b[?25h", + HideCursor: "\x1b[?25l", + AttrOff: "\x1b(B\x1b[m", + Underline: "\x1b[4m", + Bold: "\x1b[1m", + Dim: "\x1b[2m", + Italic: "\x1b[3m", + Reverse: "\x1b[7m", + EnterKeypad: "\x1b[?1h\x1b=", + ExitKeypad: "\x1b[?1l\x1b>", + SetFg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m", + SetBg: "\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m", + SetFgBg: "\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;;%?%p2%{8}%<%t4%p2%d%e%p2%{16}%<%t10%p2%{8}%-%d%e48;5;%p2%d%;m", + ResetFgBg: "\x1b[39;49m", + AltChars: "++,,--..00``aaffgghhiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~", + EnterAcs: "\x1b(0", + ExitAcs: "\x1b(B", + Mouse: "\x1b[M", + SetCursor: "\x1b[%i%p1%d;%p2%dH", + CursorBack1: "\b", + CursorUp1: "\x1b[A", + KeyUp: "\x1bOA", + KeyDown: "\x1bOB", + KeyRight: "\x1bOC", + KeyLeft: "\x1bOD", + KeyInsert: "\x1b[2~", + KeyDelete: "\x1b[3~", + KeyBackspace: "\u007f", + KeyHome: "\x1bOH", + KeyEnd: "\x1bOF", + KeyPgUp: "\x1b[5~", + KeyPgDn: "\x1b[6~", + KeyF1: "\x1bOP", + KeyF2: "\x1bOQ", + KeyF3: "\x1bOR", + KeyF4: "\x1bOS", + KeyF5: "\x1b[15~", + KeyF6: "\x1b[17~", + KeyF7: "\x1b[18~", + KeyF8: "\x1b[19~", + KeyF9: "\x1b[20~", + KeyF10: "\x1b[21~", + KeyF11: "\x1b[23~", + KeyF12: "\x1b[24~", + KeyBacktab: "\x1b[Z", + Modifiers: 1, + }) +} diff --git a/vendor/github.com/gdamore/tcell/v2/terms_default.go b/vendor/github.com/gdamore/tcell/v2/terms_default.go new file mode 100644 index 0000000000..4ce763702c --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terms_default.go @@ -0,0 +1,23 @@ +// +build !tcell_minimal + +// Copyright 2019 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + // This imports the default terminal entries. To disable, use the + // tcell_minimal build tag. + _ "github.com/gdamore/tcell/v2/terminfo/extended" +) diff --git a/vendor/github.com/gdamore/tcell/v2/terms_dynamic.go b/vendor/github.com/gdamore/tcell/v2/terms_dynamic.go new file mode 100644 index 0000000000..8f0994a86d --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terms_dynamic.go @@ -0,0 +1,37 @@ +// +build !tcell_minimal,!nacl,!js,!zos,!plan9,!windows,!android + +// Copyright 2019 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + // This imports a dynamic version of the terminal database, which + // is built using infocmp. This relies on a working installation + // of infocmp (typically supplied with ncurses). We only do this + // for systems likely to have that -- i.e. UNIX based hosts. We + // also don't support Android here, because you really don't want + // to run external programs there. Generally the android terminals + // will be automatically included anyway. + "github.com/gdamore/tcell/v2/terminfo" + "github.com/gdamore/tcell/v2/terminfo/dynamic" +) + +func loadDynamicTerminfo(term string) (*terminfo.Terminfo, error) { + ti, _, e := dynamic.LoadTerminfo(term) + if e != nil { + return nil, e + } + return ti, nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/terms_static.go b/vendor/github.com/gdamore/tcell/v2/terms_static.go new file mode 100644 index 0000000000..8b3fe12959 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/terms_static.go @@ -0,0 +1,27 @@ +// +build tcell_minimal nacl js zos plan9 windows android + +// Copyright 2019 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "errors" + + "github.com/gdamore/tcell/v2/terminfo" +) + +func loadDynamicTerminfo(_ string) (*terminfo.Terminfo, error) { + return nil, errors.New("terminal type unsupported") +} diff --git a/vendor/github.com/gdamore/tcell/v2/tscreen.go b/vendor/github.com/gdamore/tcell/v2/tscreen.go new file mode 100644 index 0000000000..ebd61a2588 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/tscreen.go @@ -0,0 +1,1727 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import ( + "bytes" + "errors" + "io" + "os" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/term" + "golang.org/x/text/transform" + + "github.com/gdamore/tcell/v2/terminfo" + + // import the stock terminals + _ "github.com/gdamore/tcell/v2/terminfo/base" +) + +// NewTerminfoScreen returns a Screen that uses the stock TTY interface +// and POSIX terminal control, combined with a terminfo description taken from +// the $TERM environment variable. It returns an error if the terminal +// is not supported for any reason. +// +// For terminals that do not support dynamic resize events, the $LINES +// $COLUMNS environment variables can be set to the actual window size, +// otherwise defaults taken from the terminal database are used. +func NewTerminfoScreen() (Screen, error) { + return NewTerminfoScreenFromTty(nil) +} + +// NewTerminfoScreenFromTty returns a Screen using a custom Tty implementation. +// If the passed in tty is nil, then a reasonable default (typically /dev/tty) +// is presumed, at least on UNIX hosts. (Windows hosts will typically fail this +// call altogether.) +func NewTerminfoScreenFromTty(tty Tty) (Screen, error) { + ti, e := terminfo.LookupTerminfo(os.Getenv("TERM")) + if e != nil { + ti, e = loadDynamicTerminfo(os.Getenv("TERM")) + if e != nil { + return nil, e + } + terminfo.AddTerminfo(ti) + } + t := &tScreen{ti: ti, tty: tty} + + t.keyexist = make(map[Key]bool) + t.keycodes = make(map[string]*tKeyCode) + if len(ti.Mouse) > 0 { + t.mouse = []byte(ti.Mouse) + } + t.prepareKeys() + t.buildAcsMap() + t.resizeQ = make(chan bool, 1) + t.fallback = make(map[rune]string) + for k, v := range RuneFallbacks { + t.fallback[k] = v + } + + return t, nil +} + +// tKeyCode represents a combination of a key code and modifiers. +type tKeyCode struct { + key Key + mod ModMask +} + +// tScreen represents a screen backed by a terminfo implementation. +type tScreen struct { + ti *terminfo.Terminfo + tty Tty + h int + w int + fini bool + cells CellBuffer + buffering bool // true if we are collecting writes to buf instead of sending directly to out + buf bytes.Buffer + curstyle Style + style Style + evch chan Event + resizeQ chan bool + quit chan struct{} + keyexist map[Key]bool + keycodes map[string]*tKeyCode + keychan chan []byte + keytimer *time.Timer + keyexpire time.Time + cx int + cy int + mouse []byte + clear bool + cursorx int + cursory int + wasbtn bool + acs map[rune]string + charset string + encoder transform.Transformer + decoder transform.Transformer + fallback map[rune]string + colors map[Color]Color + palette []Color + truecolor bool + escaped bool + buttondn bool + finiOnce sync.Once + enablePaste string + disablePaste string + saved *term.State + stopQ chan struct{} + running bool + wg sync.WaitGroup + mouseFlags MouseFlags + pasteEnabled bool + + sync.Mutex +} + +func (t *tScreen) Init() error { + if e := t.initialize(); e != nil { + return e + } + + t.evch = make(chan Event, 10) + t.keychan = make(chan []byte, 10) + t.keytimer = time.NewTimer(time.Millisecond * 50) + t.charset = "UTF-8" + + t.charset = getCharset() + if enc := GetEncoding(t.charset); enc != nil { + t.encoder = enc.NewEncoder() + t.decoder = enc.NewDecoder() + } else { + return ErrNoCharset + } + ti := t.ti + + // environment overrides + w := ti.Columns + h := ti.Lines + if i, _ := strconv.Atoi(os.Getenv("LINES")); i != 0 { + h = i + } + if i, _ := strconv.Atoi(os.Getenv("COLUMNS")); i != 0 { + w = i + } + if t.ti.SetFgBgRGB != "" || t.ti.SetFgRGB != "" || t.ti.SetBgRGB != "" { + t.truecolor = true + } + // A user who wants to have his themes honored can + // set this environment variable. + if os.Getenv("TCELL_TRUECOLOR") == "disable" { + t.truecolor = false + } + t.colors = make(map[Color]Color) + t.palette = make([]Color, t.nColors()) + for i := 0; i < t.nColors(); i++ { + t.palette[i] = Color(i) | ColorValid + // identity map for our builtin colors + t.colors[Color(i)|ColorValid] = Color(i) | ColorValid + } + + t.quit = make(chan struct{}) + + t.Lock() + t.cx = -1 + t.cy = -1 + t.style = StyleDefault + t.cells.Resize(w, h) + t.cursorx = -1 + t.cursory = -1 + t.resize() + t.Unlock() + + if err := t.engage(); err != nil { + return err + } + + return nil +} + +func (t *tScreen) prepareKeyMod(key Key, mod ModMask, val string) { + if val != "" { + // Do not override codes that already exist + if _, exist := t.keycodes[val]; !exist { + t.keyexist[key] = true + t.keycodes[val] = &tKeyCode{key: key, mod: mod} + } + } +} + +func (t *tScreen) prepareKeyModReplace(key Key, replace Key, mod ModMask, val string) { + if val != "" { + // Do not override codes that already exist + if old, exist := t.keycodes[val]; !exist || old.key == replace { + t.keyexist[key] = true + t.keycodes[val] = &tKeyCode{key: key, mod: mod} + } + } +} + +func (t *tScreen) prepareKeyModXTerm(key Key, val string) { + + if strings.HasPrefix(val, "\x1b[") && strings.HasSuffix(val, "~") { + + // Drop the trailing ~ + val = val[:len(val)-1] + + // These suffixes are calculated assuming Xterm style modifier suffixes. + // Please see https://invisible-island.net/xterm/ctlseqs/ctlseqs.pdf for + // more information (specifically "PC-Style Function Keys"). + t.prepareKeyModReplace(key, key+12, ModShift, val+";2~") + t.prepareKeyModReplace(key, key+48, ModAlt, val+";3~") + t.prepareKeyModReplace(key, key+60, ModAlt|ModShift, val+";4~") + t.prepareKeyModReplace(key, key+24, ModCtrl, val+";5~") + t.prepareKeyModReplace(key, key+36, ModCtrl|ModShift, val+";6~") + t.prepareKeyMod(key, ModAlt|ModCtrl, val+";7~") + t.prepareKeyMod(key, ModShift|ModAlt|ModCtrl, val+";8~") + t.prepareKeyMod(key, ModMeta, val+";9~") + t.prepareKeyMod(key, ModMeta|ModShift, val+";10~") + t.prepareKeyMod(key, ModMeta|ModAlt, val+";11~") + t.prepareKeyMod(key, ModMeta|ModAlt|ModShift, val+";12~") + t.prepareKeyMod(key, ModMeta|ModCtrl, val+";13~") + t.prepareKeyMod(key, ModMeta|ModCtrl|ModShift, val+";14~") + t.prepareKeyMod(key, ModMeta|ModCtrl|ModAlt, val+";15~") + t.prepareKeyMod(key, ModMeta|ModCtrl|ModAlt|ModShift, val+";16~") + } else if strings.HasPrefix(val, "\x1bO") && len(val) == 3 { + val = val[2:] + t.prepareKeyModReplace(key, key+12, ModShift, "\x1b[1;2"+val) + t.prepareKeyModReplace(key, key+48, ModAlt, "\x1b[1;3"+val) + t.prepareKeyModReplace(key, key+24, ModCtrl, "\x1b[1;5"+val) + t.prepareKeyModReplace(key, key+36, ModCtrl|ModShift, "\x1b[1;6"+val) + t.prepareKeyModReplace(key, key+60, ModAlt|ModShift, "\x1b[1;4"+val) + t.prepareKeyMod(key, ModAlt|ModCtrl, "\x1b[1;7"+val) + t.prepareKeyMod(key, ModShift|ModAlt|ModCtrl, "\x1b[1;8"+val) + t.prepareKeyMod(key, ModMeta, "\x1b[1;9"+val) + t.prepareKeyMod(key, ModMeta|ModShift, "\x1b[1;10"+val) + t.prepareKeyMod(key, ModMeta|ModAlt, "\x1b[1;11"+val) + t.prepareKeyMod(key, ModMeta|ModAlt|ModShift, "\x1b[1;12"+val) + t.prepareKeyMod(key, ModMeta|ModCtrl, "\x1b[1;13"+val) + t.prepareKeyMod(key, ModMeta|ModCtrl|ModShift, "\x1b[1;14"+val) + t.prepareKeyMod(key, ModMeta|ModCtrl|ModAlt, "\x1b[1;15"+val) + t.prepareKeyMod(key, ModMeta|ModCtrl|ModAlt|ModShift, "\x1b[1;16"+val) + } +} + +func (t *tScreen) prepareXtermModifiers() { + if t.ti.Modifiers != terminfo.ModifiersXTerm { + return + } + t.prepareKeyModXTerm(KeyRight, t.ti.KeyRight) + t.prepareKeyModXTerm(KeyLeft, t.ti.KeyLeft) + t.prepareKeyModXTerm(KeyUp, t.ti.KeyUp) + t.prepareKeyModXTerm(KeyDown, t.ti.KeyDown) + t.prepareKeyModXTerm(KeyInsert, t.ti.KeyInsert) + t.prepareKeyModXTerm(KeyDelete, t.ti.KeyDelete) + t.prepareKeyModXTerm(KeyPgUp, t.ti.KeyPgUp) + t.prepareKeyModXTerm(KeyPgDn, t.ti.KeyPgDn) + t.prepareKeyModXTerm(KeyHome, t.ti.KeyHome) + t.prepareKeyModXTerm(KeyEnd, t.ti.KeyEnd) + t.prepareKeyModXTerm(KeyF1, t.ti.KeyF1) + t.prepareKeyModXTerm(KeyF2, t.ti.KeyF2) + t.prepareKeyModXTerm(KeyF3, t.ti.KeyF3) + t.prepareKeyModXTerm(KeyF4, t.ti.KeyF4) + t.prepareKeyModXTerm(KeyF5, t.ti.KeyF5) + t.prepareKeyModXTerm(KeyF6, t.ti.KeyF6) + t.prepareKeyModXTerm(KeyF7, t.ti.KeyF7) + t.prepareKeyModXTerm(KeyF8, t.ti.KeyF8) + t.prepareKeyModXTerm(KeyF9, t.ti.KeyF9) + t.prepareKeyModXTerm(KeyF10, t.ti.KeyF10) + t.prepareKeyModXTerm(KeyF11, t.ti.KeyF11) + t.prepareKeyModXTerm(KeyF12, t.ti.KeyF12) +} + +func (t *tScreen) prepareBracketedPaste() { + // Another workaround for lack of reporting in terminfo. + // We assume if the terminal has a mouse entry, that it + // offers bracketed paste. But we allow specific overrides + // via our terminal database. + if t.ti.EnablePaste != "" { + t.enablePaste = t.ti.EnablePaste + t.disablePaste = t.ti.DisablePaste + t.prepareKey(keyPasteStart, t.ti.PasteStart) + t.prepareKey(keyPasteEnd, t.ti.PasteEnd) + } else if t.ti.Mouse != "" { + t.enablePaste = "\x1b[?2004h" + t.disablePaste = "\x1b[?2004l" + t.prepareKey(keyPasteStart, "\x1b[200~") + t.prepareKey(keyPasteEnd, "\x1b[201~") + } +} + +func (t *tScreen) prepareKey(key Key, val string) { + t.prepareKeyMod(key, ModNone, val) +} + +func (t *tScreen) prepareKeys() { + ti := t.ti + t.prepareKey(KeyBackspace, ti.KeyBackspace) + t.prepareKey(KeyF1, ti.KeyF1) + t.prepareKey(KeyF2, ti.KeyF2) + t.prepareKey(KeyF3, ti.KeyF3) + t.prepareKey(KeyF4, ti.KeyF4) + t.prepareKey(KeyF5, ti.KeyF5) + t.prepareKey(KeyF6, ti.KeyF6) + t.prepareKey(KeyF7, ti.KeyF7) + t.prepareKey(KeyF8, ti.KeyF8) + t.prepareKey(KeyF9, ti.KeyF9) + t.prepareKey(KeyF10, ti.KeyF10) + t.prepareKey(KeyF11, ti.KeyF11) + t.prepareKey(KeyF12, ti.KeyF12) + t.prepareKey(KeyF13, ti.KeyF13) + t.prepareKey(KeyF14, ti.KeyF14) + t.prepareKey(KeyF15, ti.KeyF15) + t.prepareKey(KeyF16, ti.KeyF16) + t.prepareKey(KeyF17, ti.KeyF17) + t.prepareKey(KeyF18, ti.KeyF18) + t.prepareKey(KeyF19, ti.KeyF19) + t.prepareKey(KeyF20, ti.KeyF20) + t.prepareKey(KeyF21, ti.KeyF21) + t.prepareKey(KeyF22, ti.KeyF22) + t.prepareKey(KeyF23, ti.KeyF23) + t.prepareKey(KeyF24, ti.KeyF24) + t.prepareKey(KeyF25, ti.KeyF25) + t.prepareKey(KeyF26, ti.KeyF26) + t.prepareKey(KeyF27, ti.KeyF27) + t.prepareKey(KeyF28, ti.KeyF28) + t.prepareKey(KeyF29, ti.KeyF29) + t.prepareKey(KeyF30, ti.KeyF30) + t.prepareKey(KeyF31, ti.KeyF31) + t.prepareKey(KeyF32, ti.KeyF32) + t.prepareKey(KeyF33, ti.KeyF33) + t.prepareKey(KeyF34, ti.KeyF34) + t.prepareKey(KeyF35, ti.KeyF35) + t.prepareKey(KeyF36, ti.KeyF36) + t.prepareKey(KeyF37, ti.KeyF37) + t.prepareKey(KeyF38, ti.KeyF38) + t.prepareKey(KeyF39, ti.KeyF39) + t.prepareKey(KeyF40, ti.KeyF40) + t.prepareKey(KeyF41, ti.KeyF41) + t.prepareKey(KeyF42, ti.KeyF42) + t.prepareKey(KeyF43, ti.KeyF43) + t.prepareKey(KeyF44, ti.KeyF44) + t.prepareKey(KeyF45, ti.KeyF45) + t.prepareKey(KeyF46, ti.KeyF46) + t.prepareKey(KeyF47, ti.KeyF47) + t.prepareKey(KeyF48, ti.KeyF48) + t.prepareKey(KeyF49, ti.KeyF49) + t.prepareKey(KeyF50, ti.KeyF50) + t.prepareKey(KeyF51, ti.KeyF51) + t.prepareKey(KeyF52, ti.KeyF52) + t.prepareKey(KeyF53, ti.KeyF53) + t.prepareKey(KeyF54, ti.KeyF54) + t.prepareKey(KeyF55, ti.KeyF55) + t.prepareKey(KeyF56, ti.KeyF56) + t.prepareKey(KeyF57, ti.KeyF57) + t.prepareKey(KeyF58, ti.KeyF58) + t.prepareKey(KeyF59, ti.KeyF59) + t.prepareKey(KeyF60, ti.KeyF60) + t.prepareKey(KeyF61, ti.KeyF61) + t.prepareKey(KeyF62, ti.KeyF62) + t.prepareKey(KeyF63, ti.KeyF63) + t.prepareKey(KeyF64, ti.KeyF64) + t.prepareKey(KeyInsert, ti.KeyInsert) + t.prepareKey(KeyDelete, ti.KeyDelete) + t.prepareKey(KeyHome, ti.KeyHome) + t.prepareKey(KeyEnd, ti.KeyEnd) + t.prepareKey(KeyUp, ti.KeyUp) + t.prepareKey(KeyDown, ti.KeyDown) + t.prepareKey(KeyLeft, ti.KeyLeft) + t.prepareKey(KeyRight, ti.KeyRight) + t.prepareKey(KeyPgUp, ti.KeyPgUp) + t.prepareKey(KeyPgDn, ti.KeyPgDn) + t.prepareKey(KeyHelp, ti.KeyHelp) + t.prepareKey(KeyPrint, ti.KeyPrint) + t.prepareKey(KeyCancel, ti.KeyCancel) + t.prepareKey(KeyExit, ti.KeyExit) + t.prepareKey(KeyBacktab, ti.KeyBacktab) + + t.prepareKeyMod(KeyRight, ModShift, ti.KeyShfRight) + t.prepareKeyMod(KeyLeft, ModShift, ti.KeyShfLeft) + t.prepareKeyMod(KeyUp, ModShift, ti.KeyShfUp) + t.prepareKeyMod(KeyDown, ModShift, ti.KeyShfDown) + t.prepareKeyMod(KeyHome, ModShift, ti.KeyShfHome) + t.prepareKeyMod(KeyEnd, ModShift, ti.KeyShfEnd) + t.prepareKeyMod(KeyPgUp, ModShift, ti.KeyShfPgUp) + t.prepareKeyMod(KeyPgDn, ModShift, ti.KeyShfPgDn) + + t.prepareKeyMod(KeyRight, ModCtrl, ti.KeyCtrlRight) + t.prepareKeyMod(KeyLeft, ModCtrl, ti.KeyCtrlLeft) + t.prepareKeyMod(KeyUp, ModCtrl, ti.KeyCtrlUp) + t.prepareKeyMod(KeyDown, ModCtrl, ti.KeyCtrlDown) + t.prepareKeyMod(KeyHome, ModCtrl, ti.KeyCtrlHome) + t.prepareKeyMod(KeyEnd, ModCtrl, ti.KeyCtrlEnd) + + // Sadly, xterm handling of keycodes is somewhat erratic. In + // particular, different codes are sent depending on application + // mode is in use or not, and the entries for many of these are + // simply absent from terminfo on many systems. So we insert + // a number of escape sequences if they are not already used, in + // order to have the widest correct usage. Note that prepareKey + // will not inject codes if the escape sequence is already known. + // We also only do this for terminals that have the application + // mode present. + + // Cursor mode + if ti.EnterKeypad != "" { + t.prepareKey(KeyUp, "\x1b[A") + t.prepareKey(KeyDown, "\x1b[B") + t.prepareKey(KeyRight, "\x1b[C") + t.prepareKey(KeyLeft, "\x1b[D") + t.prepareKey(KeyEnd, "\x1b[F") + t.prepareKey(KeyHome, "\x1b[H") + t.prepareKey(KeyDelete, "\x1b[3~") + t.prepareKey(KeyHome, "\x1b[1~") + t.prepareKey(KeyEnd, "\x1b[4~") + t.prepareKey(KeyPgUp, "\x1b[5~") + t.prepareKey(KeyPgDn, "\x1b[6~") + + // Application mode + t.prepareKey(KeyUp, "\x1bOA") + t.prepareKey(KeyDown, "\x1bOB") + t.prepareKey(KeyRight, "\x1bOC") + t.prepareKey(KeyLeft, "\x1bOD") + t.prepareKey(KeyHome, "\x1bOH") + } + + t.prepareKey(keyPasteStart, ti.PasteStart) + t.prepareKey(keyPasteEnd, ti.PasteEnd) + t.prepareXtermModifiers() + t.prepareBracketedPaste() + +outer: + // Add key mappings for control keys. + for i := 0; i < ' '; i++ { + // Do not insert direct key codes for ambiguous keys. + // For example, ESC is used for lots of other keys, so + // when parsing this we don't want to fast path handling + // of it, but instead wait a bit before parsing it as in + // isolation. + for esc := range t.keycodes { + if []byte(esc)[0] == byte(i) { + continue outer + } + } + + t.keyexist[Key(i)] = true + + mod := ModCtrl + switch Key(i) { + case KeyBS, KeyTAB, KeyESC, KeyCR: + // directly type-able- no control sequence + mod = ModNone + } + t.keycodes[string(rune(i))] = &tKeyCode{key: Key(i), mod: mod} + } +} + +func (t *tScreen) Fini() { + t.finiOnce.Do(t.finish) +} + +func (t *tScreen) finish() { + close(t.quit) + t.finalize() +} + +func (t *tScreen) SetStyle(style Style) { + t.Lock() + if !t.fini { + t.style = style + } + t.Unlock() +} + +func (t *tScreen) Clear() { + t.Fill(' ', t.style) +} + +func (t *tScreen) Fill(r rune, style Style) { + t.Lock() + if !t.fini { + t.cells.Fill(r, style) + } + t.Unlock() +} + +func (t *tScreen) SetContent(x, y int, mainc rune, combc []rune, style Style) { + t.Lock() + if !t.fini { + t.cells.SetContent(x, y, mainc, combc, style) + } + t.Unlock() +} + +func (t *tScreen) GetContent(x, y int) (rune, []rune, Style, int) { + t.Lock() + mainc, combc, style, width := t.cells.GetContent(x, y) + t.Unlock() + return mainc, combc, style, width +} + +func (t *tScreen) SetCell(x, y int, style Style, ch ...rune) { + if len(ch) > 0 { + t.SetContent(x, y, ch[0], ch[1:], style) + } else { + t.SetContent(x, y, ' ', nil, style) + } +} + +func (t *tScreen) encodeRune(r rune, buf []byte) []byte { + + nb := make([]byte, 6) + ob := make([]byte, 6) + num := utf8.EncodeRune(ob, r) + ob = ob[:num] + dst := 0 + var err error + if enc := t.encoder; enc != nil { + enc.Reset() + dst, _, err = enc.Transform(nb, ob, true) + } + if err != nil || dst == 0 || nb[0] == '\x1a' { + // Combining characters are elided + if len(buf) == 0 { + if acs, ok := t.acs[r]; ok { + buf = append(buf, []byte(acs)...) + } else if fb, ok := t.fallback[r]; ok { + buf = append(buf, []byte(fb)...) + } else { + buf = append(buf, '?') + } + } + } else { + buf = append(buf, nb[:dst]...) + } + + return buf +} + +func (t *tScreen) sendFgBg(fg Color, bg Color) { + ti := t.ti + if ti.Colors == 0 { + return + } + if fg == ColorReset || bg == ColorReset { + t.TPuts(ti.ResetFgBg) + } + if t.truecolor { + if ti.SetFgBgRGB != "" && fg.IsRGB() && bg.IsRGB() { + r1, g1, b1 := fg.RGB() + r2, g2, b2 := bg.RGB() + t.TPuts(ti.TParm(ti.SetFgBgRGB, + int(r1), int(g1), int(b1), + int(r2), int(g2), int(b2))) + return + } + + if fg.IsRGB() && ti.SetFgRGB != "" { + r, g, b := fg.RGB() + t.TPuts(ti.TParm(ti.SetFgRGB, int(r), int(g), int(b))) + fg = ColorDefault + } + + if bg.IsRGB() && ti.SetBgRGB != "" { + r, g, b := bg.RGB() + t.TPuts(ti.TParm(ti.SetBgRGB, + int(r), int(g), int(b))) + bg = ColorDefault + } + } + + if fg.Valid() { + if v, ok := t.colors[fg]; ok { + fg = v + } else { + v = FindColor(fg, t.palette) + t.colors[fg] = v + fg = v + } + } + + if bg.Valid() { + if v, ok := t.colors[bg]; ok { + bg = v + } else { + v = FindColor(bg, t.palette) + t.colors[bg] = v + bg = v + } + } + + if fg.Valid() && bg.Valid() && ti.SetFgBg != "" { + t.TPuts(ti.TParm(ti.SetFgBg, int(fg&0xff), int(bg&0xff))) + } else { + if fg.Valid() && ti.SetFg != "" { + t.TPuts(ti.TParm(ti.SetFg, int(fg&0xff))) + } + if bg.Valid() && ti.SetBg != "" { + t.TPuts(ti.TParm(ti.SetBg, int(bg&0xff))) + } + } +} + +func (t *tScreen) drawCell(x, y int) int { + + ti := t.ti + + mainc, combc, style, width := t.cells.GetContent(x, y) + if !t.cells.Dirty(x, y) { + return width + } + + if y == t.h-1 && x == t.w-1 && t.ti.AutoMargin && ti.InsertChar != "" { + // our solution is somewhat goofy. + // we write to the second to the last cell what we want in the last cell, then we + // insert a character at that 2nd to last position to shift the last column into + // place, then we rewrite that 2nd to last cell. Old terminals suck. + t.TPuts(ti.TGoto(x-1, y)) + defer func() { + t.TPuts(ti.TGoto(x-1, y)) + t.TPuts(ti.InsertChar) + t.cy = y + t.cx = x - 1 + t.cells.SetDirty(x-1, y, true) + _ = t.drawCell(x-1, y) + t.TPuts(t.ti.TGoto(0, 0)) + t.cy = 0 + t.cx = 0 + }() + } else if t.cy != y || t.cx != x { + t.TPuts(ti.TGoto(x, y)) + t.cx = x + t.cy = y + } + + if style == StyleDefault { + style = t.style + } + if style != t.curstyle { + fg, bg, attrs := style.Decompose() + + t.TPuts(ti.AttrOff) + + t.sendFgBg(fg, bg) + if attrs&AttrBold != 0 { + t.TPuts(ti.Bold) + } + if attrs&AttrUnderline != 0 { + t.TPuts(ti.Underline) + } + if attrs&AttrReverse != 0 { + t.TPuts(ti.Reverse) + } + if attrs&AttrBlink != 0 { + t.TPuts(ti.Blink) + } + if attrs&AttrDim != 0 { + t.TPuts(ti.Dim) + } + if attrs&AttrItalic != 0 { + t.TPuts(ti.Italic) + } + if attrs&AttrStrikeThrough != 0 { + t.TPuts(ti.StrikeThrough) + } + t.curstyle = style + } + // now emit runes - taking care to not overrun width with a + // wide character, and to ensure that we emit exactly one regular + // character followed up by any residual combing characters + + if width < 1 { + width = 1 + } + + var str string + + buf := make([]byte, 0, 6) + + buf = t.encodeRune(mainc, buf) + for _, r := range combc { + buf = t.encodeRune(r, buf) + } + + str = string(buf) + if width > 1 && str == "?" { + // No FullWidth character support + str = "? " + t.cx = -1 + } + + if x > t.w-width { + // too wide to fit; emit a single space instead + width = 1 + str = " " + } + t.writeString(str) + t.cx += width + t.cells.SetDirty(x, y, false) + if width > 1 { + t.cx = -1 + } + + return width +} + +func (t *tScreen) ShowCursor(x, y int) { + t.Lock() + t.cursorx = x + t.cursory = y + t.Unlock() +} + +func (t *tScreen) HideCursor() { + t.ShowCursor(-1, -1) +} + +func (t *tScreen) showCursor() { + + x, y := t.cursorx, t.cursory + w, h := t.cells.Size() + if x < 0 || y < 0 || x >= w || y >= h { + t.hideCursor() + return + } + t.TPuts(t.ti.TGoto(x, y)) + t.TPuts(t.ti.ShowCursor) + t.cx = x + t.cy = y +} + +// writeString sends a string to the terminal. The string is sent as-is and +// this function does not expand inline padding indications (of the form +// $<[delay]> where [delay] is msec). In order to have these expanded, use +// TPuts. If the screen is "buffering", the string is collected in a buffer, +// with the intention that the entire buffer be sent to the terminal in one +// write operation at some point later. +func (t *tScreen) writeString(s string) { + if t.buffering { + _, _ = io.WriteString(&t.buf, s) + } else { + _, _ = io.WriteString(t.tty, s) + } +} + +func (t *tScreen) TPuts(s string) { + if t.buffering { + t.ti.TPuts(&t.buf, s) + } else { + t.ti.TPuts(t.tty, s) + } +} + +func (t *tScreen) Show() { + t.Lock() + if !t.fini { + t.resize() + t.draw() + } + t.Unlock() +} + +func (t *tScreen) clearScreen() { + fg, bg, _ := t.style.Decompose() + t.sendFgBg(fg, bg) + t.TPuts(t.ti.Clear) + t.clear = false +} + +func (t *tScreen) hideCursor() { + // does not update cursor position + if t.ti.HideCursor != "" { + t.TPuts(t.ti.HideCursor) + } else { + // No way to hide cursor, stick it + // at bottom right of screen + t.cx, t.cy = t.cells.Size() + t.TPuts(t.ti.TGoto(t.cx, t.cy)) + } +} + +func (t *tScreen) draw() { + // clobber cursor position, because we're gonna change it all + t.cx = -1 + t.cy = -1 + + t.buf.Reset() + t.buffering = true + defer func() { + t.buffering = false + }() + + // hide the cursor while we move stuff around + t.hideCursor() + + if t.clear { + t.clearScreen() + } + + for y := 0; y < t.h; y++ { + for x := 0; x < t.w; x++ { + width := t.drawCell(x, y) + if width > 1 { + if x+1 < t.w { + // this is necessary so that if we ever + // go back to drawing that cell, we + // actually will *draw* it. + t.cells.SetDirty(x+1, y, true) + } + } + x += width - 1 + } + } + + // restore the cursor + t.showCursor() + + _, _ = t.buf.WriteTo(t.tty) +} + +func (t *tScreen) EnableMouse(flags ...MouseFlags) { + var f MouseFlags + flagsPresent := false + for _, flag := range flags { + f |= flag + flagsPresent = true + } + if !flagsPresent { + f = MouseMotionEvents + } + + t.Lock() + t.mouseFlags = f + t.enableMouse(f) + t.Unlock() +} + +func (t *tScreen) enableMouse(f MouseFlags) { + // Rather than using terminfo to find mouse escape sequences, we rely on the fact that + // pretty much *every* terminal that supports mouse tracking follows the + // XTerm standards (the modern ones). + if len(t.mouse) != 0 { + // start by disabling all tracking. + t.TPuts("\x1b[?1000l\x1b[?1002l\x1b[?1003l\x1b[?1006l") + if f&MouseMotionEvents != 0 { + t.TPuts("\x1b[?1003h\x1b[?1006h") + } else if f&MouseDragEvents != 0 { + t.TPuts("\x1b[?1002h\x1b[?1006h") + } else if f&MouseButtonEvents != 0 { + t.TPuts("\x1b[?1000h\x1b[?1006h") + } + } +} + +func (t *tScreen) DisableMouse() { + t.Lock() + t.mouseFlags = 0 + t.enableMouse(0) + t.Unlock() +} + +func (t *tScreen) EnablePaste() { + t.Lock() + t.pasteEnabled = true + t.enablePasting(true) + t.Unlock() +} + +func (t *tScreen) DisablePaste() { + t.Lock() + t.pasteEnabled = false + t.enablePasting(false) + t.Unlock() +} + +func (t *tScreen) enablePasting(on bool) { + var s string + if on { + s = t.enablePaste + } else { + s = t.disablePaste + } + if s != "" { + t.TPuts(s) + } +} + +func (t *tScreen) Size() (int, int) { + t.Lock() + w, h := t.w, t.h + t.Unlock() + return w, h +} + +func (t *tScreen) resize() { + if w, h, e := t.tty.WindowSize(); e == nil { + if w != t.w || h != t.h { + t.cx = -1 + t.cy = -1 + + t.cells.Resize(w, h) + t.cells.Invalidate() + t.h = h + t.w = w + ev := NewEventResize(w, h) + _ = t.PostEvent(ev) + } + } +} + +func (t *tScreen) Colors() int { + // this doesn't change, no need for lock + if t.truecolor { + return 1 << 24 + } + return t.ti.Colors +} + +// nColors returns the size of the built-in palette. +// This is distinct from Colors(), as it will generally +// always be a small number. (<= 256) +func (t *tScreen) nColors() int { + return t.ti.Colors +} + +func (t *tScreen) ChannelEvents(ch chan<- Event, quit <-chan struct{}) { + defer close(ch) + for { + select { + case <-quit: + return + case <-t.quit: + return + case ev := <-t.evch: + select { + case <-quit: + return + case <-t.quit: + return + case ch <- ev: + } + } + } +} + +func (t *tScreen) PollEvent() Event { + select { + case <-t.quit: + return nil + case ev := <-t.evch: + return ev + } +} + +func (t *tScreen) HasPendingEvent() bool { + return len(t.evch) > 0 +} + +// vtACSNames is a map of bytes defined by terminfo that are used in +// the terminals Alternate Character Set to represent other glyphs. +// For example, the upper left corner of the box drawing set can be +// displayed by printing "l" while in the alternate character set. +// Its not quite that simple, since the "l" is the terminfo name, +// and it may be necessary to use a different character based on +// the terminal implementation (or the terminal may lack support for +// this altogether). See buildAcsMap below for detail. +var vtACSNames = map[byte]rune{ + '+': RuneRArrow, + ',': RuneLArrow, + '-': RuneUArrow, + '.': RuneDArrow, + '0': RuneBlock, + '`': RuneDiamond, + 'a': RuneCkBoard, + 'b': '␉', // VT100, Not defined by terminfo + 'c': '␌', // VT100, Not defined by terminfo + 'd': '␋', // VT100, Not defined by terminfo + 'e': '␊', // VT100, Not defined by terminfo + 'f': RuneDegree, + 'g': RunePlMinus, + 'h': RuneBoard, + 'i': RuneLantern, + 'j': RuneLRCorner, + 'k': RuneURCorner, + 'l': RuneULCorner, + 'm': RuneLLCorner, + 'n': RunePlus, + 'o': RuneS1, + 'p': RuneS3, + 'q': RuneHLine, + 'r': RuneS7, + 's': RuneS9, + 't': RuneLTee, + 'u': RuneRTee, + 'v': RuneBTee, + 'w': RuneTTee, + 'x': RuneVLine, + 'y': RuneLEqual, + 'z': RuneGEqual, + '{': RunePi, + '|': RuneNEqual, + '}': RuneSterling, + '~': RuneBullet, +} + +// buildAcsMap builds a map of characters that we translate from Unicode to +// alternate character encodings. To do this, we use the standard VT100 ACS +// maps. This is only done if the terminal lacks support for Unicode; we +// always prefer to emit Unicode glyphs when we are able. +func (t *tScreen) buildAcsMap() { + acsstr := t.ti.AltChars + t.acs = make(map[rune]string) + for len(acsstr) > 2 { + srcv := acsstr[0] + dstv := string(acsstr[1]) + if r, ok := vtACSNames[srcv]; ok { + t.acs[r] = t.ti.EnterAcs + dstv + t.ti.ExitAcs + } + acsstr = acsstr[2:] + } +} + +func (t *tScreen) PostEventWait(ev Event) { + t.evch <- ev +} + +func (t *tScreen) PostEvent(ev Event) error { + select { + case t.evch <- ev: + return nil + default: + return ErrEventQFull + } +} + +func (t *tScreen) clip(x, y int) (int, int) { + w, h := t.cells.Size() + if x < 0 { + x = 0 + } + if y < 0 { + y = 0 + } + if x > w-1 { + x = w - 1 + } + if y > h-1 { + y = h - 1 + } + return x, y +} + +// buildMouseEvent returns an event based on the supplied coordinates and button +// state. Note that the screen's mouse button state is updated based on the +// input to this function (i.e. it mutates the receiver). +func (t *tScreen) buildMouseEvent(x, y, btn int) *EventMouse { + + // XTerm mouse events only report at most one button at a time, + // which may include a wheel button. Wheel motion events are + // reported as single impulses, while other button events are reported + // as separate press & release events. + + button := ButtonNone + mod := ModNone + + // Mouse wheel has bit 6 set, no release events. It should be noted + // that wheel events are sometimes misdelivered as mouse button events + // during a click-drag, so we debounce these, considering them to be + // button press events unless we see an intervening release event. + switch btn & 0x43 { + case 0: + button = Button1 + t.wasbtn = true + case 1: + button = Button3 // Note we prefer to treat right as button 2 + t.wasbtn = true + case 2: + button = Button2 // And the middle button as button 3 + t.wasbtn = true + case 3: + button = ButtonNone + t.wasbtn = false + case 0x40: + if !t.wasbtn { + button = WheelUp + } else { + button = Button1 + } + case 0x41: + if !t.wasbtn { + button = WheelDown + } else { + button = Button2 + } + } + + if btn&0x4 != 0 { + mod |= ModShift + } + if btn&0x8 != 0 { + mod |= ModAlt + } + if btn&0x10 != 0 { + mod |= ModCtrl + } + + // Some terminals will report mouse coordinates outside the + // screen, especially with click-drag events. Clip the coordinates + // to the screen in that case. + x, y = t.clip(x, y) + + return NewEventMouse(x, y, button, mod) +} + +// parseSgrMouse attempts to locate an SGR mouse record at the start of the +// buffer. It returns true, true if it found one, and the associated bytes +// be removed from the buffer. It returns true, false if the buffer might +// contain such an event, but more bytes are necessary (partial match), and +// false, false if the content is definitely *not* an SGR mouse record. +func (t *tScreen) parseSgrMouse(buf *bytes.Buffer, evs *[]Event) (bool, bool) { + + b := buf.Bytes() + + var x, y, btn, state int + dig := false + neg := false + motion := false + i := 0 + val := 0 + + for i = range b { + switch b[i] { + case '\x1b': + if state != 0 { + return false, false + } + state = 1 + + case '\x9b': + if state != 0 { + return false, false + } + state = 2 + + case '[': + if state != 1 { + return false, false + } + state = 2 + + case '<': + if state != 2 { + return false, false + } + val = 0 + dig = false + neg = false + state = 3 + + case '-': + if state != 3 && state != 4 && state != 5 { + return false, false + } + if dig || neg { + return false, false + } + neg = true // stay in state + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + if state != 3 && state != 4 && state != 5 { + return false, false + } + val *= 10 + val += int(b[i] - '0') + dig = true // stay in state + + case ';': + if neg { + val = -val + } + switch state { + case 3: + btn, val = val, 0 + neg, dig, state = false, false, 4 + case 4: + x, val = val-1, 0 + neg, dig, state = false, false, 5 + default: + return false, false + } + + case 'm', 'M': + if state != 5 { + return false, false + } + if neg { + val = -val + } + y = val - 1 + + motion = (btn & 32) != 0 + btn &^= 32 + if b[i] == 'm' { + // mouse release, clear all buttons + btn |= 3 + btn &^= 0x40 + t.buttondn = false + } else if motion { + /* + * Some broken terminals appear to send + * mouse button one motion events, instead of + * encoding 35 (no buttons) into these events. + * We resolve these by looking for a non-motion + * event first. + */ + if !t.buttondn { + btn |= 3 + btn &^= 0x40 + } + } else { + t.buttondn = true + } + // consume the event bytes + for i >= 0 { + _, _ = buf.ReadByte() + i-- + } + *evs = append(*evs, t.buildMouseEvent(x, y, btn)) + return true, true + } + } + + // incomplete & inconclusive at this point + return true, false +} + +// parseXtermMouse is like parseSgrMouse, but it parses a legacy +// X11 mouse record. +func (t *tScreen) parseXtermMouse(buf *bytes.Buffer, evs *[]Event) (bool, bool) { + + b := buf.Bytes() + + state := 0 + btn := 0 + x := 0 + y := 0 + + for i := range b { + switch state { + case 0: + switch b[i] { + case '\x1b': + state = 1 + case '\x9b': + state = 2 + default: + return false, false + } + case 1: + if b[i] != '[' { + return false, false + } + state = 2 + case 2: + if b[i] != 'M' { + return false, false + } + state++ + case 3: + btn = int(b[i]) + state++ + case 4: + x = int(b[i]) - 32 - 1 + state++ + case 5: + y = int(b[i]) - 32 - 1 + for i >= 0 { + _, _ = buf.ReadByte() + i-- + } + *evs = append(*evs, t.buildMouseEvent(x, y, btn)) + return true, true + } + } + return true, false +} + +func (t *tScreen) parseFunctionKey(buf *bytes.Buffer, evs *[]Event) (bool, bool) { + b := buf.Bytes() + partial := false + for e, k := range t.keycodes { + esc := []byte(e) + if (len(esc) == 1) && (esc[0] == '\x1b') { + continue + } + if bytes.HasPrefix(b, esc) { + // matched + var r rune + if len(esc) == 1 { + r = rune(b[0]) + } + mod := k.mod + if t.escaped { + mod |= ModAlt + t.escaped = false + } + switch k.key { + case keyPasteStart: + *evs = append(*evs, NewEventPaste(true)) + case keyPasteEnd: + *evs = append(*evs, NewEventPaste(false)) + default: + *evs = append(*evs, NewEventKey(k.key, r, mod)) + } + for i := 0; i < len(esc); i++ { + _, _ = buf.ReadByte() + } + return true, true + } + if bytes.HasPrefix(esc, b) { + partial = true + } + } + return partial, false +} + +func (t *tScreen) parseRune(buf *bytes.Buffer, evs *[]Event) (bool, bool) { + b := buf.Bytes() + if b[0] >= ' ' && b[0] <= 0x7F { + // printable ASCII easy to deal with -- no encodings + mod := ModNone + if t.escaped { + mod = ModAlt + t.escaped = false + } + *evs = append(*evs, NewEventKey(KeyRune, rune(b[0]), mod)) + _, _ = buf.ReadByte() + return true, true + } + + if b[0] < 0x80 { + // Low numbered values are control keys, not runes. + return false, false + } + + utf := make([]byte, 12) + for l := 1; l <= len(b); l++ { + t.decoder.Reset() + nOut, nIn, e := t.decoder.Transform(utf, b[:l], true) + if e == transform.ErrShortSrc { + continue + } + if nOut != 0 { + r, _ := utf8.DecodeRune(utf[:nOut]) + if r != utf8.RuneError { + mod := ModNone + if t.escaped { + mod = ModAlt + t.escaped = false + } + *evs = append(*evs, NewEventKey(KeyRune, r, mod)) + } + for nIn > 0 { + _, _ = buf.ReadByte() + nIn-- + } + return true, true + } + } + // Looks like potential escape + return true, false +} + +func (t *tScreen) scanInput(buf *bytes.Buffer, expire bool) { + evs := t.collectEventsFromInput(buf, expire) + + for _, ev := range evs { + t.PostEventWait(ev) + } +} + +// Return an array of Events extracted from the supplied buffer. This is done +// while holding the screen's lock - the events can then be queued for +// application processing with the lock released. +func (t *tScreen) collectEventsFromInput(buf *bytes.Buffer, expire bool) []Event { + + res := make([]Event, 0, 20) + + t.Lock() + defer t.Unlock() + + for { + b := buf.Bytes() + if len(b) == 0 { + buf.Reset() + return res + } + + partials := 0 + + if part, comp := t.parseRune(buf, &res); comp { + continue + } else if part { + partials++ + } + + if part, comp := t.parseFunctionKey(buf, &res); comp { + continue + } else if part { + partials++ + } + + // Only parse mouse records if this term claims to have + // mouse support + + if t.ti.Mouse != "" { + if part, comp := t.parseXtermMouse(buf, &res); comp { + continue + } else if part { + partials++ + } + + if part, comp := t.parseSgrMouse(buf, &res); comp { + continue + } else if part { + partials++ + } + } + + if partials == 0 || expire { + if b[0] == '\x1b' { + if len(b) == 1 { + res = append(res, NewEventKey(KeyEsc, 0, ModNone)) + t.escaped = false + } else { + t.escaped = true + } + _, _ = buf.ReadByte() + continue + } + // Nothing was going to match, or we timed out + // waiting for more data -- just deliver the characters + // to the app & let them sort it out. Possibly we + // should only do this for control characters like ESC. + by, _ := buf.ReadByte() + mod := ModNone + if t.escaped { + t.escaped = false + mod = ModAlt + } + res = append(res, NewEventKey(KeyRune, rune(by), mod)) + continue + } + + // well we have some partial data, wait until we get + // some more + break + } + + return res +} + +func (t *tScreen) mainLoop(stopQ chan struct{}) { + defer t.wg.Done() + buf := &bytes.Buffer{} + for { + select { + case <-stopQ: + return + case <-t.quit: + return + case <-t.resizeQ: + t.Lock() + t.cx = -1 + t.cy = -1 + t.resize() + t.cells.Invalidate() + t.draw() + t.Unlock() + continue + case <-t.keytimer.C: + // If the timer fired, and the current time + // is after the expiration of the escape sequence, + // then we assume the escape sequence reached it's + // conclusion, and process the chunk independently. + // This lets us detect conflicts such as a lone ESC. + if buf.Len() > 0 { + if time.Now().After(t.keyexpire) { + t.scanInput(buf, true) + } + } + if buf.Len() > 0 { + if !t.keytimer.Stop() { + select { + case <-t.keytimer.C: + default: + } + } + t.keytimer.Reset(time.Millisecond * 50) + } + case chunk := <-t.keychan: + buf.Write(chunk) + t.keyexpire = time.Now().Add(time.Millisecond * 50) + t.scanInput(buf, false) + if !t.keytimer.Stop() { + select { + case <-t.keytimer.C: + default: + } + } + if buf.Len() > 0 { + t.keytimer.Reset(time.Millisecond * 50) + } + } + } +} + +func (t *tScreen) inputLoop(stopQ chan struct{}) { + + defer t.wg.Done() + for { + select { + case <-stopQ: + return + default: + } + chunk := make([]byte, 128) + n, e := t.tty.Read(chunk) + switch e { + case nil: + default: + _ = t.PostEvent(NewEventError(e)) + return + } + if n > 0 { + t.keychan <- chunk[:n] + } + } +} + +func (t *tScreen) Sync() { + t.Lock() + t.cx = -1 + t.cy = -1 + if !t.fini { + t.resize() + t.clear = true + t.cells.Invalidate() + t.draw() + } + t.Unlock() +} + +func (t *tScreen) CharacterSet() string { + return t.charset +} + +func (t *tScreen) RegisterRuneFallback(orig rune, fallback string) { + t.Lock() + t.fallback[orig] = fallback + t.Unlock() +} + +func (t *tScreen) UnregisterRuneFallback(orig rune) { + t.Lock() + delete(t.fallback, orig) + t.Unlock() +} + +func (t *tScreen) CanDisplay(r rune, checkFallbacks bool) bool { + + if enc := t.encoder; enc != nil { + nb := make([]byte, 6) + ob := make([]byte, 6) + num := utf8.EncodeRune(ob, r) + + enc.Reset() + dst, _, err := enc.Transform(nb, ob[:num], true) + if dst != 0 && err == nil && nb[0] != '\x1A' { + return true + } + } + // Terminal fallbacks always permitted, since we assume they are + // basically nearly perfect renditions. + if _, ok := t.acs[r]; ok { + return true + } + if !checkFallbacks { + return false + } + if _, ok := t.fallback[r]; ok { + return true + } + return false +} + +func (t *tScreen) HasMouse() bool { + return len(t.mouse) != 0 +} + +func (t *tScreen) HasKey(k Key) bool { + if k == KeyRune { + return true + } + return t.keyexist[k] +} + +func (t *tScreen) Resize(int, int, int, int) {} + +func (t *tScreen) Suspend() error { + t.disengage() + return nil +} + +func (t *tScreen) Resume() error { + return t.engage() +} + +// engage is used to place the terminal in raw mode and establish screen size, etc. +// Thing of this is as tcell "engaging" the clutch, as it's going to be driving the +// terminal interface. +func (t *tScreen) engage() error { + t.Lock() + defer t.Unlock() + if t.tty == nil { + return ErrNoScreen + } + t.tty.NotifyResize(func() { + select { + case t.resizeQ <- true: + default: + } + }) + if t.running { + return errors.New("already engaged") + } + if err := t.tty.Start(); err != nil { + return err + } + t.running = true + if w, h, err := t.tty.WindowSize(); err == nil && w != 0 && h != 0 { + t.cells.Resize(w, h) + } + stopQ := make(chan struct{}) + t.stopQ = stopQ + t.enableMouse(t.mouseFlags) + t.enablePasting(t.pasteEnabled) + + ti := t.ti + t.TPuts(ti.EnterCA) + t.TPuts(ti.EnterKeypad) + t.TPuts(ti.HideCursor) + t.TPuts(ti.EnableAcs) + t.TPuts(ti.Clear) + + t.wg.Add(2) + go t.inputLoop(stopQ) + go t.mainLoop(stopQ) + return nil +} + +// disengage is used to release the terminal back to support from the caller. +// Think of this as tcell disengaging the clutch, so that another application +// can take over the terminal interface. This restores the TTY mode that was +// present when the application was first started. +func (t *tScreen) disengage() { + + t.Lock() + if !t.running { + t.Unlock() + return + } + t.running = false + stopQ := t.stopQ + close(stopQ) + _ = t.tty.Drain() + t.Unlock() + + t.tty.NotifyResize(nil) + // wait for everything to shut down + t.wg.Wait() + + // shutdown the screen and disable special modes (e.g. mouse and bracketed paste) + ti := t.ti + t.cells.Resize(0, 0) + t.TPuts(ti.ShowCursor) + t.TPuts(ti.ResetFgBg) + t.TPuts(ti.AttrOff) + t.TPuts(ti.Clear) + t.TPuts(ti.ExitCA) + t.TPuts(ti.ExitKeypad) + t.enableMouse(0) + t.enablePasting(false) + + _ = t.tty.Stop() +} + +// Beep emits a beep to the terminal. +func (t *tScreen) Beep() error { + t.writeString(string(byte(7))) + return nil +} + +// finalize is used to at application shutdown, and restores the terminal +// to it's initial state. It should not be called more than once. +func (t *tScreen) finalize() { + t.disengage() + _ = t.tty.Close() +} diff --git a/vendor/github.com/gdamore/tcell/v2/tscreen_stub.go b/vendor/github.com/gdamore/tcell/v2/tscreen_stub.go new file mode 100644 index 0000000000..1611cb5531 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/tscreen_stub.go @@ -0,0 +1,25 @@ +// +build js plan9 windows + +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +// NB: We might someday wish to move Windows to this model. However, +// that would probably mean sacrificing some of the richer key reporting +// that we can obtain with the console API present on Windows. + +func (t *tScreen) initialize() error { + return ErrNoScreen +} diff --git a/vendor/github.com/gdamore/tcell/v2/tscreen_unix.go b/vendor/github.com/gdamore/tcell/v2/tscreen_unix.go new file mode 100644 index 0000000000..1eb7504a7b --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/tscreen_unix.go @@ -0,0 +1,31 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package tcell + +// initialize is used at application startup, and sets up the initial values +// including file descriptors used for terminals and saving the initial state +// so that it can be restored when the application terminates. +func (t *tScreen) initialize() error { + var err error + if t.tty == nil { + t.tty, err = NewDevTty() + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gdamore/tcell/v2/tty.go b/vendor/github.com/gdamore/tcell/v2/tty.go new file mode 100644 index 0000000000..567b041876 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/tty.go @@ -0,0 +1,56 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tcell + +import "io" + +// Tty is an abstraction of a tty (traditionally "teletype"). This allows applications to +// provide for alternate backends, as there are situations where the traditional /dev/tty +// does not work, or where more flexible handling is required. This interface is for use +// with the terminfo-style based API. It extends the io.ReadWriter API. It is reasonable +// that the implementation might choose to use different underlying files for the Reader +// and Writer sides of this API, as part of it's internal implementation. +type Tty interface { + // Start is used to activate the Tty for use. Upon return the terminal should be + // in raw mode, non-blocking, etc. The implementation should take care of saving + // any state that is required so that it may be restored when Stop is called. + Start() error + + // Stop is used to stop using this Tty instance. This may be a suspend, so that other + // terminal based applications can run in the foreground. Implementations should + // restore any state collected at Start(), and return to ordinary blocking mode, etc. + // Drain is called first to drain the input. Once this is called, no more Read + // or Write calls will be made until Start is called again. + Stop() error + + // Drain is called before Stop, and ensures that the reader will wake up appropriately + // if it was blocked. This workaround is required for /dev/tty on certain UNIX systems + // to ensure that Read() does not block forever. This typically arranges for the tty driver + // to send data immediately (e.g. VMIN and VTIME both set zero) and sets a deadline on input. + // Implementations may reasonably make this a no-op. There will still be control sequences + // emitted between the time this is called, and when Stop is called. + Drain() error + + // NotifyResize is used register a callback when the tty thinks the dimensions have + // changed. The standard UNIX implementation links this to a handler for SIGWINCH. + // If the supplied callback is nil, then any handler should be unregistered. + NotifyResize(cb func()) + + // WindowSize is called to determine the terminal dimensions. This might be determined + // by an ioctl or other means. + WindowSize() (width int, height int, err error) + + io.ReadWriteCloser +} \ No newline at end of file diff --git a/vendor/github.com/gdamore/tcell/v2/tty_unix.go b/vendor/github.com/gdamore/tcell/v2/tty_unix.go new file mode 100644 index 0000000000..0c6ea78498 --- /dev/null +++ b/vendor/github.com/gdamore/tcell/v2/tty_unix.go @@ -0,0 +1,190 @@ +// Copyright 2021 The TCell Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use file except in compliance with the License. +// You may obtain a copy of the license at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package tcell + +import ( + "errors" + "fmt" + "os" + "os/signal" + "strconv" + "sync" + "syscall" + "time" + + "golang.org/x/term" +) + +// devTty is an implementation of the Tty API based upon /dev/tty. +type devTty struct { + fd int + f *os.File + of *os.File // the first open of /dev/tty + saved *term.State + sig chan os.Signal + cb func() + stopQ chan struct{} + dev string + wg sync.WaitGroup + l sync.Mutex +} + +func (tty *devTty) Read(b []byte) (int, error) { + return tty.f.Read(b) +} + +func (tty *devTty) Write(b []byte) (int, error) { + return tty.f.Write(b) +} + +func (tty *devTty) Close() error { + return tty.f.Close() +} + +func (tty *devTty) Start() error { + tty.l.Lock() + defer tty.l.Unlock() + + // We open another copy of /dev/tty. This is a workaround for unusual behavior + // observed in macOS, apparently caused when a subshell (for example) closes our + // own tty device (when it exits for example). Getting a fresh new one seems to + // resolve the problem. (We believe this is a bug in the macOS tty driver that + // fails to account for dup() references to the same file before applying close() + // related behaviors to the tty.) We're also holding the original copy we opened + // since closing that might have deleterious effects as well. The upshot is that + // we will have up to two separate file handles open on /dev/tty. (Note that when + // using stdin/stdout instead of /dev/tty this problem is not observed.) + var err error + if tty.f, err = os.OpenFile(tty.dev, os.O_RDWR, 0); err != nil { + return err + } + tty.fd = int(tty.of.Fd()) + + if !term.IsTerminal(tty.fd) { + return errors.New("device is not a terminal") + } + + _ = tty.f.SetReadDeadline(time.Time{}) + saved, err := term.MakeRaw(tty.fd) // also sets vMin and vTime + if err != nil { + return err + } + tty.saved = saved + + tty.stopQ = make(chan struct{}) + tty.wg.Add(1) + go func(stopQ chan struct{}) { + defer tty.wg.Done() + for { + select { + case <-tty.sig: + tty.l.Lock() + cb := tty.cb + tty.l.Unlock() + if cb != nil { + cb() + } + case <-stopQ: + return + } + } + }(tty.stopQ) + + signal.Notify(tty.sig, syscall.SIGWINCH) + return nil +} + +func (tty *devTty) Drain() error { + _ = tty.f.SetReadDeadline(time.Now()) + if err := tcSetBufParams(tty.fd, 0, 0); err != nil { + return err + } + return nil +} + +func (tty *devTty) Stop() error { + tty.l.Lock() + if err := term.Restore(tty.fd, tty.saved); err != nil { + tty.l.Unlock() + return err + } + _ = tty.f.SetReadDeadline(time.Now()) + + signal.Stop(tty.sig) + close(tty.stopQ) + tty.l.Unlock() + + tty.wg.Wait() + + // close our tty device -- we'll get another one if we Start again later. + _ = tty.f.Close() + + return nil +} + +func (tty *devTty) WindowSize() (int, int, error) { + w, h, err := term.GetSize(tty.fd) + if err != nil { + return 0, 0, err + } + if w == 0 { + w, _ = strconv.Atoi(os.Getenv("COLUMNS")) + } + if w == 0 { + w = 80 // default + } + if h == 0 { + h, _ = strconv.Atoi(os.Getenv("LINES")) + } + if h == 0 { + h = 25 // default + } + return w, h, nil +} + +func (tty *devTty) NotifyResize(cb func()) { + tty.l.Lock() + tty.cb = cb + tty.l.Unlock() +} + +// NewDevTty opens a /dev/tty based Tty. +func NewDevTty() (Tty, error) { + return NewDevTtyFromDev("/dev/tty") +} + +// NewDevTtyFromDev opens a tty device given a path. This can be useful to bind to other nodes. +func NewDevTtyFromDev(dev string) (Tty, error) { + tty := &devTty{ + dev: dev, + sig: make(chan os.Signal), + } + var err error + if tty.of, err = os.OpenFile(dev, os.O_RDWR, 0); err != nil { + return nil, err + } + tty.fd = int(tty.of.Fd()) + if !term.IsTerminal(tty.fd) { + _ = tty.f.Close() + return nil, errors.New("not a terminal") + } + if tty.saved, err = term.GetState(tty.fd); err != nil { + _ = tty.f.Close() + return nil, fmt.Errorf("failed to get state: %w", err) + } + return tty, nil +} diff --git a/vendor/github.com/go-git/gcfg/LICENSE b/vendor/github.com/go-git/gcfg/LICENSE new file mode 100644 index 0000000000..87a5cede33 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/go-git/gcfg/README b/vendor/github.com/go-git/gcfg/README new file mode 100644 index 0000000000..1ff233a529 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/README @@ -0,0 +1,4 @@ +Gcfg reads INI-style configuration files into Go structs; +supports user-defined types and subsections. + +Package docs: https://godoc.org/gopkg.in/gcfg.v1 diff --git a/vendor/github.com/go-git/gcfg/doc.go b/vendor/github.com/go-git/gcfg/doc.go new file mode 100644 index 0000000000..7bdefbf020 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/doc.go @@ -0,0 +1,145 @@ +// Package gcfg reads "INI-style" text-based configuration files with +// "name=value" pairs grouped into sections (gcfg files). +// +// This package is still a work in progress; see the sections below for planned +// changes. +// +// Syntax +// +// The syntax is based on that used by git config: +// http://git-scm.com/docs/git-config#_syntax . +// There are some (planned) differences compared to the git config format: +// - improve data portability: +// - must be encoded in UTF-8 (for now) and must not contain the 0 byte +// - include and "path" type is not supported +// (path type may be implementable as a user-defined type) +// - internationalization +// - section and variable names can contain unicode letters, unicode digits +// (as defined in http://golang.org/ref/spec#Characters ) and hyphens +// (U+002D), starting with a unicode letter +// - disallow potentially ambiguous or misleading definitions: +// - `[sec.sub]` format is not allowed (deprecated in gitconfig) +// - `[sec ""]` is not allowed +// - use `[sec]` for section name "sec" and empty subsection name +// - (planned) within a single file, definitions must be contiguous for each: +// - section: '[secA]' -> '[secB]' -> '[secA]' is an error +// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error +// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error +// +// Data structure +// +// The functions in this package read values into a user-defined struct. +// Each section corresponds to a struct field in the config struct, and each +// variable in a section corresponds to a data field in the section struct. +// The mapping of each section or variable name to fields is done either based +// on the "gcfg" struct tag or by matching the name of the section or variable, +// ignoring case. In the latter case, hyphens '-' in section and variable names +// correspond to underscores '_' in field names. +// Fields must be exported; to use a section or variable name starting with a +// letter that is neither upper- or lower-case, prefix the field name with 'X'. +// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .) +// +// For sections with subsections, the corresponding field in config must be a +// map, rather than a struct, with string keys and pointer-to-struct values. +// Values for subsection variables are stored in the map with the subsection +// name used as the map key. +// (Note that unlike section and variable names, subsection names are case +// sensitive.) +// When using a map, and there is a section with the same section name but +// without a subsection name, its values are stored with the empty string used +// as the key. +// It is possible to provide default values for subsections in the section +// "default-" (or by setting values in the corresponding struct +// field "Default_"). +// +// The functions in this package panic if config is not a pointer to a struct, +// or when a field is not of a suitable type (either a struct or a map with +// string keys and pointer-to-struct values). +// +// Parsing of values +// +// The section structs in the config struct may contain single-valued or +// multi-valued variables. Variables of unnamed slice type (that is, a type +// starting with `[]`) are treated as multi-value; all others (including named +// slice types) are treated as single-valued variables. +// +// Single-valued variables are handled based on the type as follows. +// Unnamed pointer types (that is, types starting with `*`) are dereferenced, +// and if necessary, a new instance is allocated. +// +// For types implementing the encoding.TextUnmarshaler interface, the +// UnmarshalText method is used to set the value. Implementing this method is +// the recommended way for parsing user-defined types. +// +// For fields of string kind, the value string is assigned to the field, after +// unquoting and unescaping as needed. +// For fields of bool kind, the field is set to true if the value is "true", +// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or +// "0", ignoring case. In addition, single-valued bool fields can be specified +// with a "blank" value (variable name without equals sign and value); in such +// case the value is set to true. +// +// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as +// decimal or hexadecimal (if having '0x' prefix). (This is to prevent +// unintuitively handling zero-padded numbers as octal.) Other types having +// [u]int* as the underlying type, such as os.FileMode and uintptr allow +// decimal, hexadecimal, or octal values. +// Parsing mode for integer types can be overridden using the struct tag option +// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters +// (each standing for decimal, hexadecimal, and octal, respectively.) +// +// All other types are parsed using fmt.Sscanf with the "%v" verb. +// +// For multi-valued variables, each individual value is parsed as above and +// appended to the slice. If the first value is specified as a "blank" value +// (variable name without equals sign and value), a new slice is allocated; +// that is any values previously set in the slice will be ignored. +// +// The types subpackage for provides helpers for parsing "enum-like" and integer +// types. +// +// Error handling +// +// There are 3 types of errors: +// +// - programmer errors / panics: +// - invalid configuration structure +// - data errors: +// - fatal errors: +// - invalid configuration syntax +// - warnings: +// - data that doesn't belong to any part of the config structure +// +// Programmer errors trigger panics. These are should be fixed by the programmer +// before releasing code that uses gcfg. +// +// Data errors cause gcfg to return a non-nil error value. This includes the +// case when there are extra unknown key-value definitions in the configuration +// data (extra data). +// However, in some occasions it is desirable to be able to proceed in +// situations when the only data error is that of extra data. +// These errors are handled at a different (warning) priority and can be +// filtered out programmatically. To ignore extra data warnings, wrap the +// gcfg.Read*Into invocation into a call to gcfg.FatalOnly. +// +// TODO +// +// The following is a list of changes under consideration: +// - documentation +// - self-contained syntax documentation +// - more practical examples +// - move TODOs to issue tracker (eventually) +// - syntax +// - reconsider valid escape sequences +// (gitconfig doesn't support \r in value, \t in subsection name, etc.) +// - reading / parsing gcfg files +// - define internal representation structure +// - support multiple inputs (readers, strings, files) +// - support declaring encoding (?) +// - support varying fields sets for subsections (?) +// - writing gcfg files +// - error handling +// - make error context accessible programmatically? +// - limit input size? +// +package gcfg // import "github.com/go-git/gcfg" diff --git a/vendor/github.com/go-git/gcfg/errors.go b/vendor/github.com/go-git/gcfg/errors.go new file mode 100644 index 0000000000..853c76021d --- /dev/null +++ b/vendor/github.com/go-git/gcfg/errors.go @@ -0,0 +1,41 @@ +package gcfg + +import ( + "gopkg.in/warnings.v0" +) + +// FatalOnly filters the results of a Read*Into invocation and returns only +// fatal errors. That is, errors (warnings) indicating data for unknown +// sections / variables is ignored. Example invocation: +// +// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile)) +// if err != nil { +// ... +// +func FatalOnly(err error) error { + return warnings.FatalOnly(err) +} + +func isFatal(err error) bool { + _, ok := err.(extraData) + return !ok +} + +type extraData struct { + section string + subsection *string + variable *string +} + +func (e extraData) Error() string { + s := "can't store data at section \"" + e.section + "\"" + if e.subsection != nil { + s += ", subsection \"" + *e.subsection + "\"" + } + if e.variable != nil { + s += ", variable \"" + *e.variable + "\"" + } + return s +} + +var _ error = extraData{} diff --git a/vendor/github.com/go-git/gcfg/go1_0.go b/vendor/github.com/go-git/gcfg/go1_0.go new file mode 100644 index 0000000000..6670210791 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/go1_0.go @@ -0,0 +1,7 @@ +// +build !go1.2 + +package gcfg + +type textUnmarshaler interface { + UnmarshalText(text []byte) error +} diff --git a/vendor/github.com/go-git/gcfg/go1_2.go b/vendor/github.com/go-git/gcfg/go1_2.go new file mode 100644 index 0000000000..6f5843bc7c --- /dev/null +++ b/vendor/github.com/go-git/gcfg/go1_2.go @@ -0,0 +1,9 @@ +// +build go1.2 + +package gcfg + +import ( + "encoding" +) + +type textUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/github.com/go-git/gcfg/read.go b/vendor/github.com/go-git/gcfg/read.go new file mode 100644 index 0000000000..4dfdc5cf30 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/read.go @@ -0,0 +1,273 @@ +package gcfg + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strings" + + "github.com/go-git/gcfg/scanner" + "github.com/go-git/gcfg/token" + "gopkg.in/warnings.v0" +) + +var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t', 'b': '\b'} + +// no error: invalid literals should be caught by scanner +func unquote(s string) string { + u, q, esc := make([]rune, 0, len(s)), false, false + for _, c := range s { + if esc { + uc, ok := unescape[c] + switch { + case ok: + u = append(u, uc) + fallthrough + case !q && c == '\n': + esc = false + continue + } + panic("invalid escape sequence") + } + switch c { + case '"': + q = !q + case '\\': + esc = true + default: + u = append(u, c) + } + } + if q { + panic("missing end quote") + } + if esc { + panic("invalid escape sequence") + } + return string(u) +} + +func read(c *warnings.Collector, callback func(string, string, string, string, bool) error, + fset *token.FileSet, file *token.File, src []byte) error { + // + var s scanner.Scanner + var errs scanner.ErrorList + s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0) + sect, sectsub := "", "" + pos, tok, lit := s.Scan() + errfn := func(msg string) error { + return fmt.Errorf("%s: %s", fset.Position(pos), msg) + } + for { + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + switch tok { + case token.EOF: + return nil + case token.EOL, token.COMMENT: + pos, tok, lit = s.Scan() + case token.LBRACK: + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.IDENT { + if err := c.Collect(errfn("expected section name")); err != nil { + return err + } + } + sect, sectsub = lit, "" + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok == token.STRING { + sectsub = unquote(lit) + if sectsub == "" { + if err := c.Collect(errfn("empty subsection name")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + } + if tok != token.RBRACK { + if sectsub == "" { + if err := c.Collect(errfn("expected subsection name or right bracket")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected right bracket")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + // If a section/subsection header was found, ensure a + // container object is created, even if there are no + // variables further down. + err := c.Collect(callback(sect, sectsub, "", "", true)) + if err != nil { + return err + } + case token.IDENT: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + n := lit + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + return errs.Err() + } + blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, "" + if !blank { + if tok != token.ASSIGN { + if err := c.Collect(errfn("expected '='")); err != nil { + return err + } + } + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.STRING { + if err := c.Collect(errfn("expected value")); err != nil { + return err + } + } + v = unquote(lit) + pos, tok, lit = s.Scan() + if errs.Len() > 0 { + if err := c.Collect(errs.Err()); err != nil { + return err + } + } + if tok != token.EOL && tok != token.EOF && tok != token.COMMENT { + if err := c.Collect(errfn("expected EOL, EOF, or comment")); err != nil { + return err + } + } + } + err := c.Collect(callback(sect, sectsub, n, v, blank)) + if err != nil { + return err + } + default: + if sect == "" { + if err := c.Collect(errfn("expected section header")); err != nil { + return err + } + } + if err := c.Collect(errfn("expected section header or variable declaration")); err != nil { + return err + } + } + } + panic("never reached") +} + +func readInto(config interface{}, fset *token.FileSet, file *token.File, + src []byte) error { + // + c := warnings.NewCollector(isFatal) + firstPassCallback := func(s string, ss string, k string, v string, bv bool) error { + return set(c, config, s, ss, k, v, bv, false) + } + err := read(c, firstPassCallback, fset, file, src) + if err != nil { + return err + } + secondPassCallback := func(s string, ss string, k string, v string, bv bool) error { + return set(c, config, s, ss, k, v, bv, true) + } + err = read(c, secondPassCallback, fset, file, src) + if err != nil { + return err + } + return c.Done() +} + +// ReadWithCallback reads gcfg formatted data from reader and calls +// callback with each section and option found. +// +// Callback is called with section, subsection, option key, option value +// and blank value flag as arguments. +// +// When a section is found, callback is called with nil subsection, option key +// and option value. +// +// When a subsection is found, callback is called with nil option key and +// option value. +// +// If blank value flag is true, it means that the value was not set for an option +// (as opposed to set to empty string). +// +// If callback returns an error, ReadWithCallback terminates with an error too. +func ReadWithCallback(reader io.Reader, callback func(string, string, string, string, bool) error) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + c := warnings.NewCollector(isFatal) + + return read(c, callback, fset, file, src) +} + +// ReadInto reads gcfg formatted data from reader and sets the values into the +// corresponding fields in config. +func ReadInto(config interface{}, reader io.Reader) error { + src, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile("", fset.Base(), len(src)) + return readInto(config, fset, file, src) +} + +// ReadStringInto reads gcfg formatted data from str and sets the values into +// the corresponding fields in config. +func ReadStringInto(config interface{}, str string) error { + r := strings.NewReader(str) + return ReadInto(config, r) +} + +// ReadFileInto reads gcfg formatted data from the file filename and sets the +// values into the corresponding fields in config. +func ReadFileInto(config interface{}, filename string) error { + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + src, err := ioutil.ReadAll(f) + if err != nil { + return err + } + fset := token.NewFileSet() + file := fset.AddFile(filename, fset.Base(), len(src)) + return readInto(config, fset, file, src) +} diff --git a/vendor/github.com/go-git/gcfg/scanner/errors.go b/vendor/github.com/go-git/gcfg/scanner/errors.go new file mode 100644 index 0000000000..a6e00f5c64 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/scanner/errors.go @@ -0,0 +1,121 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package scanner + +import ( + "fmt" + "io" + "sort" +) + +import ( + "github.com/go-git/gcfg/token" +) + +// In an ErrorList, an error is represented by an *Error. +// The position Pos, if valid, points to the beginning of +// the offending token, and the error condition is described +// by Msg. +// +type Error struct { + Pos token.Position + Msg string +} + +// Error implements the error interface. +func (e Error) Error() string { + if e.Pos.Filename != "" || e.Pos.IsValid() { + // don't print "" + // TODO(gri) reconsider the semantics of Position.IsValid + return e.Pos.String() + ": " + e.Msg + } + return e.Msg +} + +// ErrorList is a list of *Errors. +// The zero value for an ErrorList is an empty ErrorList ready to use. +// +type ErrorList []*Error + +// Add adds an Error with given position and error message to an ErrorList. +func (p *ErrorList) Add(pos token.Position, msg string) { + *p = append(*p, &Error{pos, msg}) +} + +// Reset resets an ErrorList to no errors. +func (p *ErrorList) Reset() { *p = (*p)[0:0] } + +// ErrorList implements the sort Interface. +func (p ErrorList) Len() int { return len(p) } +func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func (p ErrorList) Less(i, j int) bool { + e := &p[i].Pos + f := &p[j].Pos + if e.Filename < f.Filename { + return true + } + if e.Filename == f.Filename { + return e.Offset < f.Offset + } + return false +} + +// Sort sorts an ErrorList. *Error entries are sorted by position, +// other errors are sorted by error message, and before any *Error +// entry. +// +func (p ErrorList) Sort() { + sort.Sort(p) +} + +// RemoveMultiples sorts an ErrorList and removes all but the first error per line. +func (p *ErrorList) RemoveMultiples() { + sort.Sort(p) + var last token.Position // initial last.Line is != any legal error line + i := 0 + for _, e := range *p { + if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line { + last = e.Pos + (*p)[i] = e + i++ + } + } + (*p) = (*p)[0:i] +} + +// An ErrorList implements the error interface. +func (p ErrorList) Error() string { + switch len(p) { + case 0: + return "no errors" + case 1: + return p[0].Error() + } + return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1) +} + +// Err returns an error equivalent to this error list. +// If the list is empty, Err returns nil. +func (p ErrorList) Err() error { + if len(p) == 0 { + return nil + } + return p +} + +// PrintError is a utility function that prints a list of errors to w, +// one error per line, if the err parameter is an ErrorList. Otherwise +// it prints the err string. +// +func PrintError(w io.Writer, err error) { + if list, ok := err.(ErrorList); ok { + for _, e := range list { + fmt.Fprintf(w, "%s\n", e) + } + } else if err != nil { + fmt.Fprintf(w, "%s\n", err) + } +} diff --git a/vendor/github.com/go-git/gcfg/scanner/scanner.go b/vendor/github.com/go-git/gcfg/scanner/scanner.go new file mode 100644 index 0000000000..41aafec758 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/scanner/scanner.go @@ -0,0 +1,342 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package scanner implements a scanner for gcfg configuration text. +// It takes a []byte as source which can then be tokenized +// through repeated calls to the Scan method. +// +// Note that the API for the scanner package may change to accommodate new +// features or implementation changes in gcfg. +// +package scanner + +import ( + "fmt" + "path/filepath" + "unicode" + "unicode/utf8" +) + +import ( + "github.com/go-git/gcfg/token" +) + +// An ErrorHandler may be provided to Scanner.Init. If a syntax error is +// encountered and a handler was installed, the handler is called with a +// position and an error message. The position points to the beginning of +// the offending token. +// +type ErrorHandler func(pos token.Position, msg string) + +// A Scanner holds the scanner's internal state while processing +// a given text. It can be allocated as part of another data +// structure but must be initialized via Init before use. +// +type Scanner struct { + // immutable state + file *token.File // source file handle + dir string // directory portion of file.Name() + src []byte // source + err ErrorHandler // error reporting; or nil + mode Mode // scanning mode + + // scanning state + ch rune // current character + offset int // character offset + rdOffset int // reading offset (position after current character) + lineOffset int // current line offset + nextVal bool // next token is expected to be a value + + // public state - ok to modify + ErrorCount int // number of errors encountered +} + +// Read the next Unicode char into s.ch. +// s.ch < 0 means end-of-file. +// +func (s *Scanner) next() { + if s.rdOffset < len(s.src) { + s.offset = s.rdOffset + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + r, w := rune(s.src[s.rdOffset]), 1 + switch { + case r == 0: + s.error(s.offset, "illegal character NUL") + case r >= 0x80: + // not ASCII + r, w = utf8.DecodeRune(s.src[s.rdOffset:]) + if r == utf8.RuneError && w == 1 { + s.error(s.offset, "illegal UTF-8 encoding") + } + } + s.rdOffset += w + s.ch = r + } else { + s.offset = len(s.src) + if s.ch == '\n' { + s.lineOffset = s.offset + s.file.AddLine(s.offset) + } + s.ch = -1 // eof + } +} + +// A mode value is a set of flags (or 0). +// They control scanner behavior. +// +type Mode uint + +const ( + ScanComments Mode = 1 << iota // return comments as COMMENT tokens +) + +// Init prepares the scanner s to tokenize the text src by setting the +// scanner at the beginning of src. The scanner uses the file set file +// for position information and it adds line information for each line. +// It is ok to re-use the same file when re-scanning the same file as +// line information which is already present is ignored. Init causes a +// panic if the file size does not match the src size. +// +// Calls to Scan will invoke the error handler err if they encounter a +// syntax error and err is not nil. Also, for each error encountered, +// the Scanner field ErrorCount is incremented by one. The mode parameter +// determines how comments are handled. +// +// Note that Init may call err if there is an error in the first character +// of the file. +// +func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) { + // Explicitly initialize all fields since a scanner may be reused. + if file.Size() != len(src) { + panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src))) + } + s.file = file + s.dir, _ = filepath.Split(file.Name()) + s.src = src + s.err = err + s.mode = mode + + s.ch = ' ' + s.offset = 0 + s.rdOffset = 0 + s.lineOffset = 0 + s.ErrorCount = 0 + s.nextVal = false + + s.next() +} + +func (s *Scanner) error(offs int, msg string) { + if s.err != nil { + s.err(s.file.Position(s.file.Pos(offs)), msg) + } + s.ErrorCount++ +} + +func (s *Scanner) scanComment() string { + // initial [;#] already consumed + offs := s.offset - 1 // position of initial [;#] + + for s.ch != '\n' && s.ch >= 0 { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func isLetter(ch rune) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch) +} + +func isDigit(ch rune) bool { + return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch) +} + +func (s *Scanner) scanIdentifier() string { + offs := s.offset + for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' { + s.next() + } + return string(s.src[offs:s.offset]) +} + +func (s *Scanner) scanEscape(val bool) { + offs := s.offset + ch := s.ch + s.next() // always make progress + switch ch { + case '\\', '"': + // ok + case 'n', 't', 'b': + if val { + break // ok + } + fallthrough + default: + s.error(offs, "unknown escape sequence") + } +} + +func (s *Scanner) scanString() string { + // '"' opening already consumed + offs := s.offset - 1 + + for s.ch != '"' { + ch := s.ch + s.next() + if ch == '\n' || ch < 0 { + s.error(offs, "string not terminated") + break + } + if ch == '\\' { + s.scanEscape(false) + } + } + + s.next() + + return string(s.src[offs:s.offset]) +} + +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] +} + +func (s *Scanner) scanValString() string { + offs := s.offset + + hasCR := false + end := offs + inQuote := false +loop: + for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' { + ch := s.ch + s.next() + switch { + case inQuote && ch == '\\': + s.scanEscape(true) + case !inQuote && ch == '\\': + if s.ch == '\r' { + hasCR = true + s.next() + } + if s.ch != '\n' { + s.scanEscape(true) + } else { + s.next() + } + case ch == '"': + inQuote = !inQuote + case ch == '\r': + hasCR = true + case ch < 0 || inQuote && ch == '\n': + s.error(offs, "string not terminated") + break loop + } + if inQuote || !isWhiteSpace(ch) { + end = s.offset + } + } + + lit := s.src[offs:end] + if hasCR { + lit = stripCR(lit) + } + + return string(lit) +} + +func isWhiteSpace(ch rune) bool { + return ch == ' ' || ch == '\t' || ch == '\r' +} + +func (s *Scanner) skipWhitespace() { + for isWhiteSpace(s.ch) { + s.next() + } +} + +// Scan scans the next token and returns the token position, the token, +// and its literal string if applicable. The source end is indicated by +// token.EOF. +// +// If the returned token is a literal (token.IDENT, token.STRING) or +// token.COMMENT, the literal string has the corresponding value. +// +// If the returned token is token.ILLEGAL, the literal string is the +// offending character. +// +// In all other cases, Scan returns an empty literal string. +// +// For more tolerant parsing, Scan will return a valid token if +// possible even if a syntax error was encountered. Thus, even +// if the resulting token sequence contains no illegal tokens, +// a client may not assume that no error occurred. Instead it +// must check the scanner's ErrorCount or the number of calls +// of the error handler, if there was one installed. +// +// Scan adds line information to the file added to the file +// set with Init. Token positions are relative to that file +// and thus relative to the file set. +// +func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) { +scanAgain: + s.skipWhitespace() + + // current token start + pos = s.file.Pos(s.offset) + + // determine token value + switch ch := s.ch; { + case s.nextVal: + lit = s.scanValString() + tok = token.STRING + s.nextVal = false + case isLetter(ch): + lit = s.scanIdentifier() + tok = token.IDENT + default: + s.next() // always make progress + switch ch { + case -1: + tok = token.EOF + case '\n': + tok = token.EOL + case '"': + tok = token.STRING + lit = s.scanString() + case '[': + tok = token.LBRACK + case ']': + tok = token.RBRACK + case ';', '#': + // comment + lit = s.scanComment() + if s.mode&ScanComments == 0 { + // skip comment + goto scanAgain + } + tok = token.COMMENT + case '=': + tok = token.ASSIGN + s.nextVal = true + default: + s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch)) + tok = token.ILLEGAL + lit = string(ch) + } + } + + return +} diff --git a/vendor/github.com/go-git/gcfg/set.go b/vendor/github.com/go-git/gcfg/set.go new file mode 100644 index 0000000000..e2d9278025 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/set.go @@ -0,0 +1,332 @@ +package gcfg + +import ( + "bytes" + "encoding/gob" + "fmt" + "math/big" + "reflect" + "strings" + "unicode" + "unicode/utf8" + + "github.com/go-git/gcfg/types" + "gopkg.in/warnings.v0" +) + +type tag struct { + ident string + intMode string +} + +func newTag(ts string) tag { + t := tag{} + s := strings.Split(ts, ",") + t.ident = s[0] + for _, tse := range s[1:] { + if strings.HasPrefix(tse, "int=") { + t.intMode = tse[len("int="):] + } + } + return t +} + +func fieldFold(v reflect.Value, name string) (reflect.Value, tag) { + var n string + r0, _ := utf8.DecodeRuneInString(name) + if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) { + n = "X" + } + n += strings.Replace(name, "-", "_", -1) + f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool { + if !v.FieldByName(fieldName).CanSet() { + return false + } + f, _ := v.Type().FieldByName(fieldName) + t := newTag(f.Tag.Get("gcfg")) + if t.ident != "" { + return strings.EqualFold(t.ident, name) + } + return strings.EqualFold(n, fieldName) + }) + if !ok { + return reflect.Value{}, tag{} + } + return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg")) +} + +type setter func(destp interface{}, blank bool, val string, t tag) error + +var errUnsupportedType = fmt.Errorf("unsupported type") +var errBlankUnsupported = fmt.Errorf("blank value not supported for type") + +var setters = []setter{ + typeSetter, textUnmarshalerSetter, kindSetter, scanSetter, +} + +func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error { + dtu, ok := d.(textUnmarshaler) + if !ok { + return errUnsupportedType + } + if blank { + return errBlankUnsupported + } + return dtu.UnmarshalText([]byte(val)) +} + +func boolSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true)) + return nil + } + b, err := types.ParseBool(val) + if err == nil { + reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b)) + } + return err +} + +func intMode(mode string) types.IntMode { + var m types.IntMode + if strings.ContainsAny(mode, "dD") { + m |= types.Dec + } + if strings.ContainsAny(mode, "hH") { + m |= types.Hex + } + if strings.ContainsAny(mode, "oO") { + m |= types.Oct + } + return m +} + +var typeModes = map[reflect.Type]types.IntMode{ + reflect.TypeOf(int(0)): types.Dec | types.Hex, + reflect.TypeOf(int8(0)): types.Dec | types.Hex, + reflect.TypeOf(int16(0)): types.Dec | types.Hex, + reflect.TypeOf(int32(0)): types.Dec | types.Hex, + reflect.TypeOf(int64(0)): types.Dec | types.Hex, + reflect.TypeOf(uint(0)): types.Dec | types.Hex, + reflect.TypeOf(uint8(0)): types.Dec | types.Hex, + reflect.TypeOf(uint16(0)): types.Dec | types.Hex, + reflect.TypeOf(uint32(0)): types.Dec | types.Hex, + reflect.TypeOf(uint64(0)): types.Dec | types.Hex, + // use default mode (allow dec/hex/oct) for uintptr type + reflect.TypeOf(big.Int{}): types.Dec | types.Hex, +} + +func intModeDefault(t reflect.Type) types.IntMode { + m, ok := typeModes[t] + if !ok { + m = types.Dec | types.Hex | types.Oct + } + return m +} + +func intSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + mode := intMode(t.intMode) + if mode == 0 { + mode = intModeDefault(reflect.TypeOf(d).Elem()) + } + return types.ParseInt(d, val, mode) +} + +func stringSetter(d interface{}, blank bool, val string, t tag) error { + if blank { + return errBlankUnsupported + } + dsp, ok := d.(*string) + if !ok { + return errUnsupportedType + } + *dsp = val + return nil +} + +var kindSetters = map[reflect.Kind]setter{ + reflect.String: stringSetter, + reflect.Bool: boolSetter, + reflect.Int: intSetter, + reflect.Int8: intSetter, + reflect.Int16: intSetter, + reflect.Int32: intSetter, + reflect.Int64: intSetter, + reflect.Uint: intSetter, + reflect.Uint8: intSetter, + reflect.Uint16: intSetter, + reflect.Uint32: intSetter, + reflect.Uint64: intSetter, + reflect.Uintptr: intSetter, +} + +var typeSetters = map[reflect.Type]setter{ + reflect.TypeOf(big.Int{}): intSetter, +} + +func typeSetter(d interface{}, blank bool, val string, tt tag) error { + t := reflect.ValueOf(d).Type().Elem() + setter, ok := typeSetters[t] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func kindSetter(d interface{}, blank bool, val string, tt tag) error { + k := reflect.ValueOf(d).Type().Elem().Kind() + setter, ok := kindSetters[k] + if !ok { + return errUnsupportedType + } + return setter(d, blank, val, tt) +} + +func scanSetter(d interface{}, blank bool, val string, tt tag) error { + if blank { + return errBlankUnsupported + } + return types.ScanFully(d, val, 'v') +} + +func newValue(c *warnings.Collector, sect string, vCfg reflect.Value, + vType reflect.Type) (reflect.Value, error) { + // + pv := reflect.New(vType) + dfltName := "default-" + sect + dfltField, _ := fieldFold(vCfg, dfltName) + var err error + if dfltField.IsValid() { + b := bytes.NewBuffer(nil) + ge := gob.NewEncoder(b) + if err = c.Collect(ge.EncodeValue(dfltField)); err != nil { + return pv, err + } + gd := gob.NewDecoder(bytes.NewReader(b.Bytes())) + if err = c.Collect(gd.DecodeValue(pv.Elem())); err != nil { + return pv, err + } + } + return pv, nil +} + +func set(c *warnings.Collector, cfg interface{}, sect, sub, name string, + value string, blankValue bool, subsectPass bool) error { + // + vPCfg := reflect.ValueOf(cfg) + if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("config must be a pointer to a struct")) + } + vCfg := vPCfg.Elem() + vSect, _ := fieldFold(vCfg, sect) + if !vSect.IsValid() { + err := extraData{section: sect} + return c.Collect(err) + } + isSubsect := vSect.Kind() == reflect.Map + if subsectPass != isSubsect { + return nil + } + if isSubsect { + vst := vSect.Type() + if vst.Key().Kind() != reflect.String || + vst.Elem().Kind() != reflect.Ptr || + vst.Elem().Elem().Kind() != reflect.Struct { + panic(fmt.Errorf("map field for section must have string keys and "+ + " pointer-to-struct values: section %q", sect)) + } + if vSect.IsNil() { + vSect.Set(reflect.MakeMap(vst)) + } + k := reflect.ValueOf(sub) + pv := vSect.MapIndex(k) + if !pv.IsValid() { + vType := vSect.Type().Elem().Elem() + var err error + if pv, err = newValue(c, sect, vCfg, vType); err != nil { + return err + } + vSect.SetMapIndex(k, pv) + } + vSect = pv.Elem() + } else if vSect.Kind() != reflect.Struct { + panic(fmt.Errorf("field for section must be a map or a struct: "+ + "section %q", sect)) + } else if sub != "" { + err := extraData{section: sect, subsection: &sub} + return c.Collect(err) + } + // Empty name is a special value, meaning that only the + // section/subsection object is to be created, with no values set. + if name == "" { + return nil + } + vVar, t := fieldFold(vSect, name) + if !vVar.IsValid() { + var err error + if isSubsect { + err = extraData{section: sect, subsection: &sub, variable: &name} + } else { + err = extraData{section: sect, variable: &name} + } + return c.Collect(err) + } + // vVal is either single-valued var, or newly allocated value within multi-valued var + var vVal reflect.Value + // multi-value if unnamed slice type + isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice || + vVar.Type().Name() == "" && vVar.Kind() == reflect.Ptr && vVar.Type().Elem().Name() == "" && vVar.Type().Elem().Kind() == reflect.Slice + if isMulti && vVar.Kind() == reflect.Ptr { + if vVar.IsNil() { + vVar.Set(reflect.New(vVar.Type().Elem())) + } + vVar = vVar.Elem() + } + if isMulti && blankValue { + vVar.Set(reflect.Zero(vVar.Type())) + return nil + } + if isMulti { + vVal = reflect.New(vVar.Type().Elem()).Elem() + } else { + vVal = vVar + } + isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr + isNew := isDeref && vVal.IsNil() + // vAddr is address of value to set (dereferenced & allocated as needed) + var vAddr reflect.Value + switch { + case isNew: + vAddr = reflect.New(vVal.Type().Elem()) + case isDeref && !isNew: + vAddr = vVal + default: + vAddr = vVal.Addr() + } + vAddrI := vAddr.Interface() + err, ok := error(nil), false + for _, s := range setters { + err = s(vAddrI, blankValue, value, t) + if err == nil { + ok = true + break + } + if err != errUnsupportedType { + return err + } + } + if !ok { + // in case all setters returned errUnsupportedType + return err + } + if isNew { // set reference if it was dereferenced and newly allocated + vVal.Set(vAddr) + } + if isMulti { // append if multi-valued + vVar.Set(reflect.Append(vVar, vVal)) + } + return nil +} diff --git a/vendor/github.com/go-git/gcfg/token/position.go b/vendor/github.com/go-git/gcfg/token/position.go new file mode 100644 index 0000000000..fc45c1e769 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/token/position.go @@ -0,0 +1,435 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// TODO(gri) consider making this a separate package outside the go directory. + +package token + +import ( + "fmt" + "sort" + "sync" +) + +// ----------------------------------------------------------------------------- +// Positions + +// Position describes an arbitrary source position +// including the file, line, and column location. +// A Position is valid if the line number is > 0. +// +type Position struct { + Filename string // filename, if any + Offset int // offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (character count) +} + +// IsValid returns true if the position is valid. +func (pos *Position) IsValid() bool { return pos.Line > 0 } + +// String returns a string in one of several forms: +// +// file:line:column valid position with file name +// line:column valid position without file name +// file invalid position with file name +// - invalid position without file name +// +func (pos Position) String() string { + s := pos.Filename + if pos.IsValid() { + if s != "" { + s += ":" + } + s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) + } + if s == "" { + s = "-" + } + return s +} + +// Pos is a compact encoding of a source position within a file set. +// It can be converted into a Position for a more convenient, but much +// larger, representation. +// +// The Pos value for a given file is a number in the range [base, base+size], +// where base and size are specified when adding the file to the file set via +// AddFile. +// +// To create the Pos value for a specific source offset, first add +// the respective file to the current file set (via FileSet.AddFile) +// and then call File.Pos(offset) for that file. Given a Pos value p +// for a specific file set fset, the corresponding Position value is +// obtained by calling fset.Position(p). +// +// Pos values can be compared directly with the usual comparison operators: +// If two Pos values p and q are in the same file, comparing p and q is +// equivalent to comparing the respective source file offsets. If p and q +// are in different files, p < q is true if the file implied by p was added +// to the respective file set before the file implied by q. +// +type Pos int + +// The zero value for Pos is NoPos; there is no file and line information +// associated with it, and NoPos().IsValid() is false. NoPos is always +// smaller than any other Pos value. The corresponding Position value +// for NoPos is the zero value for Position. +// +const NoPos Pos = 0 + +// IsValid returns true if the position is valid. +func (p Pos) IsValid() bool { + return p != NoPos +} + +// ----------------------------------------------------------------------------- +// File + +// A File is a handle for a file belonging to a FileSet. +// A File has a name, size, and line offset table. +// +type File struct { + set *FileSet + name string // file name as provided to AddFile + base int // Pos value range for this file is [base...base+size] + size int // file size as provided to AddFile + + // lines and infos are protected by set.mutex + lines []int + infos []lineInfo +} + +// Name returns the file name of file f as registered with AddFile. +func (f *File) Name() string { + return f.name +} + +// Base returns the base offset of file f as registered with AddFile. +func (f *File) Base() int { + return f.base +} + +// Size returns the size of file f as registered with AddFile. +func (f *File) Size() int { + return f.size +} + +// LineCount returns the number of lines in file f. +func (f *File) LineCount() int { + f.set.mutex.RLock() + n := len(f.lines) + f.set.mutex.RUnlock() + return n +} + +// AddLine adds the line offset for a new line. +// The line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise the line offset is ignored. +// +func (f *File) AddLine(offset int) { + f.set.mutex.Lock() + if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size { + f.lines = append(f.lines, offset) + } + f.set.mutex.Unlock() +} + +// SetLines sets the line offsets for a file and returns true if successful. +// The line offsets are the offsets of the first character of each line; +// for instance for the content "ab\nc\n" the line offsets are {0, 3}. +// An empty file has an empty line offset table. +// Each line offset must be larger than the offset for the previous line +// and smaller than the file size; otherwise SetLines fails and returns +// false. +// +func (f *File) SetLines(lines []int) bool { + // verify validity of lines table + size := f.size + for i, offset := range lines { + if i > 0 && offset <= lines[i-1] || size <= offset { + return false + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() + return true +} + +// SetLinesForContent sets the line offsets for the given file content. +func (f *File) SetLinesForContent(content []byte) { + var lines []int + line := 0 + for offset, b := range content { + if line >= 0 { + lines = append(lines, line) + } + line = -1 + if b == '\n' { + line = offset + 1 + } + } + + // set lines table + f.set.mutex.Lock() + f.lines = lines + f.set.mutex.Unlock() +} + +// A lineInfo object describes alternative file and line number +// information (such as provided via a //line comment in a .go +// file) for a given file offset. +type lineInfo struct { + // fields are exported to make them accessible to gob + Offset int + Filename string + Line int +} + +// AddLineInfo adds alternative file and line number information for +// a given file offset. The offset must be larger than the offset for +// the previously added alternative line info and smaller than the +// file size; otherwise the information is ignored. +// +// AddLineInfo is typically used to register alternative position +// information for //line filename:line comments in source files. +// +func (f *File) AddLineInfo(offset int, filename string, line int) { + f.set.mutex.Lock() + if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size { + f.infos = append(f.infos, lineInfo{offset, filename, line}) + } + f.set.mutex.Unlock() +} + +// Pos returns the Pos value for the given file offset; +// the offset must be <= f.Size(). +// f.Pos(f.Offset(p)) == p. +// +func (f *File) Pos(offset int) Pos { + if offset > f.size { + panic("illegal file offset") + } + return Pos(f.base + offset) +} + +// Offset returns the offset for the given file position p; +// p must be a valid Pos value in that file. +// f.Offset(f.Pos(offset)) == offset. +// +func (f *File) Offset(p Pos) int { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + return int(p) - f.base +} + +// Line returns the line number for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Line(p Pos) int { + // TODO(gri) this can be implemented much more efficiently + return f.Position(p).Line +} + +func searchLineInfos(a []lineInfo, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1 +} + +// info returns the file name, line, and column number for a file offset. +func (f *File) info(offset int) (filename string, line, column int) { + filename = f.name + if i := searchInts(f.lines, offset); i >= 0 { + line, column = i+1, offset-f.lines[i]+1 + } + if len(f.infos) > 0 { + // almost no files have extra line infos + if i := searchLineInfos(f.infos, offset); i >= 0 { + alt := &f.infos[i] + filename = alt.Filename + if i := searchInts(f.lines, alt.Offset); i >= 0 { + line += alt.Line - i - 1 + } + } + } + return +} + +func (f *File) position(p Pos) (pos Position) { + offset := int(p) - f.base + pos.Offset = offset + pos.Filename, pos.Line, pos.Column = f.info(offset) + return +} + +// Position returns the Position value for the given file position p; +// p must be a Pos value in that file or NoPos. +// +func (f *File) Position(p Pos) (pos Position) { + if p != NoPos { + if int(p) < f.base || int(p) > f.base+f.size { + panic("illegal Pos value") + } + pos = f.position(p) + } + return +} + +// ----------------------------------------------------------------------------- +// FileSet + +// A FileSet represents a set of source files. +// Methods of file sets are synchronized; multiple goroutines +// may invoke them concurrently. +// +type FileSet struct { + mutex sync.RWMutex // protects the file set + base int // base offset for the next file + files []*File // list of files in the order added to the set + last *File // cache of last file looked up +} + +// NewFileSet creates a new file set. +func NewFileSet() *FileSet { + s := new(FileSet) + s.base = 1 // 0 == NoPos + return s +} + +// Base returns the minimum base offset that must be provided to +// AddFile when adding the next file. +// +func (s *FileSet) Base() int { + s.mutex.RLock() + b := s.base + s.mutex.RUnlock() + return b + +} + +// AddFile adds a new file with a given filename, base offset, and file size +// to the file set s and returns the file. Multiple files may have the same +// name. The base offset must not be smaller than the FileSet's Base(), and +// size must not be negative. +// +// Adding the file will set the file set's Base() value to base + size + 1 +// as the minimum base value for the next file. The following relationship +// exists between a Pos value p for a given file offset offs: +// +// int(p) = base + offs +// +// with offs in the range [0, size] and thus p in the range [base, base+size]. +// For convenience, File.Pos may be used to create file-specific position +// values from a file offset. +// +func (s *FileSet) AddFile(filename string, base, size int) *File { + s.mutex.Lock() + defer s.mutex.Unlock() + if base < s.base || size < 0 { + panic("illegal base or size") + } + // base >= s.base && size >= 0 + f := &File{s, filename, base, size, []int{0}, nil} + base += size + 1 // +1 because EOF also has a position + if base < 0 { + panic("token.Pos offset overflow (> 2G of source code in file set)") + } + // add the file to the file set + s.base = base + s.files = append(s.files, f) + s.last = f + return f +} + +// Iterate calls f for the files in the file set in the order they were added +// until f returns false. +// +func (s *FileSet) Iterate(f func(*File) bool) { + for i := 0; ; i++ { + var file *File + s.mutex.RLock() + if i < len(s.files) { + file = s.files[i] + } + s.mutex.RUnlock() + if file == nil || !f(file) { + break + } + } +} + +func searchFiles(a []*File, x int) int { + return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1 +} + +func (s *FileSet) file(p Pos) *File { + // common case: p is in last file + if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size { + return f + } + // p is not in last file - search all files + if i := searchFiles(s.files, int(p)); i >= 0 { + f := s.files[i] + // f.base <= int(p) by definition of searchFiles + if int(p) <= f.base+f.size { + s.last = f + return f + } + } + return nil +} + +// File returns the file that contains the position p. +// If no such file is found (for instance for p == NoPos), +// the result is nil. +// +func (s *FileSet) File(p Pos) (f *File) { + if p != NoPos { + s.mutex.RLock() + f = s.file(p) + s.mutex.RUnlock() + } + return +} + +// Position converts a Pos in the fileset into a general Position. +func (s *FileSet) Position(p Pos) (pos Position) { + if p != NoPos { + s.mutex.RLock() + if f := s.file(p); f != nil { + pos = f.position(p) + } + s.mutex.RUnlock() + } + return +} + +// ----------------------------------------------------------------------------- +// Helper functions + +func searchInts(a []int, x int) int { + // This function body is a manually inlined version of: + // + // return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 + // + // With better compiler optimizations, this may not be needed in the + // future, but at the moment this change improves the go/printer + // benchmark performance by ~30%. This has a direct impact on the + // speed of gofmt and thus seems worthwhile (2011-04-29). + // TODO(gri): Remove this when compilers have caught up. + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 // avoid overflow when computing h + // i ≤ h < j + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} diff --git a/vendor/github.com/go-git/gcfg/token/serialize.go b/vendor/github.com/go-git/gcfg/token/serialize.go new file mode 100644 index 0000000000..4adc8f9e33 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/token/serialize.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package token + +type serializedFile struct { + // fields correspond 1:1 to fields with same (lower-case) name in File + Name string + Base int + Size int + Lines []int + Infos []lineInfo +} + +type serializedFileSet struct { + Base int + Files []serializedFile +} + +// Read calls decode to deserialize a file set into s; s must not be nil. +func (s *FileSet) Read(decode func(interface{}) error) error { + var ss serializedFileSet + if err := decode(&ss); err != nil { + return err + } + + s.mutex.Lock() + s.base = ss.Base + files := make([]*File, len(ss.Files)) + for i := 0; i < len(ss.Files); i++ { + f := &ss.Files[i] + files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos} + } + s.files = files + s.last = nil + s.mutex.Unlock() + + return nil +} + +// Write calls encode to serialize the file set s. +func (s *FileSet) Write(encode func(interface{}) error) error { + var ss serializedFileSet + + s.mutex.Lock() + ss.Base = s.base + files := make([]serializedFile, len(s.files)) + for i, f := range s.files { + files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos} + } + ss.Files = files + s.mutex.Unlock() + + return encode(ss) +} diff --git a/vendor/github.com/go-git/gcfg/token/token.go b/vendor/github.com/go-git/gcfg/token/token.go new file mode 100644 index 0000000000..b3c7c83fa9 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/token/token.go @@ -0,0 +1,83 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package token defines constants representing the lexical tokens of the gcfg +// configuration syntax and basic operations on tokens (printing, predicates). +// +// Note that the API for the token package may change to accommodate new +// features or implementation changes in gcfg. +// +package token + +import "strconv" + +// Token is the set of lexical tokens of the gcfg configuration syntax. +type Token int + +// The list of tokens. +const ( + // Special tokens + ILLEGAL Token = iota + EOF + COMMENT + + literal_beg + // Identifiers and basic type literals + // (these tokens stand for classes of literals) + IDENT // section-name, variable-name + STRING // "subsection-name", variable value + literal_end + + operator_beg + // Operators and delimiters + ASSIGN // = + LBRACK // [ + RBRACK // ] + EOL // \n + operator_end +) + +var tokens = [...]string{ + ILLEGAL: "ILLEGAL", + + EOF: "EOF", + COMMENT: "COMMENT", + + IDENT: "IDENT", + STRING: "STRING", + + ASSIGN: "=", + LBRACK: "[", + RBRACK: "]", + EOL: "\n", +} + +// String returns the string corresponding to the token tok. +// For operators and delimiters, the string is the actual token character +// sequence (e.g., for the token ASSIGN, the string is "="). For all other +// tokens the string corresponds to the token constant name (e.g. for the +// token IDENT, the string is "IDENT"). +// +func (tok Token) String() string { + s := "" + if 0 <= tok && tok < Token(len(tokens)) { + s = tokens[tok] + } + if s == "" { + s = "token(" + strconv.Itoa(int(tok)) + ")" + } + return s +} + +// Predicates + +// IsLiteral returns true for tokens corresponding to identifiers +// and basic type literals; it returns false otherwise. +// +func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end } + +// IsOperator returns true for tokens corresponding to operators and +// delimiters; it returns false otherwise. +// +func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end } diff --git a/vendor/github.com/go-git/gcfg/types/bool.go b/vendor/github.com/go-git/gcfg/types/bool.go new file mode 100644 index 0000000000..8dcae0d8cf --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/bool.go @@ -0,0 +1,23 @@ +package types + +// BoolValues defines the name and value mappings for ParseBool. +var BoolValues = map[string]interface{}{ + "true": true, "yes": true, "on": true, "1": true, + "false": false, "no": false, "off": false, "0": false, +} + +var boolParser = func() *EnumParser { + ep := &EnumParser{} + ep.AddVals(BoolValues) + return ep +}() + +// ParseBool parses bool values according to the definitions in BoolValues. +// Parsing is case-insensitive. +func ParseBool(s string) (bool, error) { + v, err := boolParser.Parse(s) + if err != nil { + return false, err + } + return v.(bool), nil +} diff --git a/vendor/github.com/go-git/gcfg/types/doc.go b/vendor/github.com/go-git/gcfg/types/doc.go new file mode 100644 index 0000000000..9f9c345f6e --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/doc.go @@ -0,0 +1,4 @@ +// Package types defines helpers for type conversions. +// +// The API for this package is not finalized yet. +package types diff --git a/vendor/github.com/go-git/gcfg/types/enum.go b/vendor/github.com/go-git/gcfg/types/enum.go new file mode 100644 index 0000000000..1a0c7ef453 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/enum.go @@ -0,0 +1,44 @@ +package types + +import ( + "fmt" + "reflect" + "strings" +) + +// EnumParser parses "enum" values; i.e. a predefined set of strings to +// predefined values. +type EnumParser struct { + Type string // type name; if not set, use type of first value added + CaseMatch bool // if true, matching of strings is case-sensitive + // PrefixMatch bool + vals map[string]interface{} +} + +// AddVals adds strings and values to an EnumParser. +func (ep *EnumParser) AddVals(vals map[string]interface{}) { + if ep.vals == nil { + ep.vals = make(map[string]interface{}) + } + for k, v := range vals { + if ep.Type == "" { + ep.Type = reflect.TypeOf(v).Name() + } + if !ep.CaseMatch { + k = strings.ToLower(k) + } + ep.vals[k] = v + } +} + +// Parse parses the string and returns the value or an error. +func (ep EnumParser) Parse(s string) (interface{}, error) { + if !ep.CaseMatch { + s = strings.ToLower(s) + } + v, ok := ep.vals[s] + if !ok { + return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s) + } + return v, nil +} diff --git a/vendor/github.com/go-git/gcfg/types/int.go b/vendor/github.com/go-git/gcfg/types/int.go new file mode 100644 index 0000000000..af7e75c125 --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/int.go @@ -0,0 +1,86 @@ +package types + +import ( + "fmt" + "strings" +) + +// An IntMode is a mode for parsing integer values, representing a set of +// accepted bases. +type IntMode uint8 + +// IntMode values for ParseInt; can be combined using binary or. +const ( + Dec IntMode = 1 << iota + Hex + Oct +) + +// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`. +func (m IntMode) String() string { + var modes []string + if m&Dec != 0 { + modes = append(modes, "Dec") + } + if m&Hex != 0 { + modes = append(modes, "Hex") + } + if m&Oct != 0 { + modes = append(modes, "Oct") + } + return "IntMode(" + strings.Join(modes, "|") + ")" +} + +var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix") + +func prefix0(val string) bool { + return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0") +} + +func prefix0x(val string) bool { + return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x") +} + +// ParseInt parses val using mode into intptr, which must be a pointer to an +// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases +// when mode permits ambiguity of base; otherwise the prefix can be omitted. +func ParseInt(intptr interface{}, val string, mode IntMode) error { + val = strings.TrimSpace(val) + verb := byte(0) + switch mode { + case Dec: + verb = 'd' + case Dec + Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Oct: + if prefix0(val) && !prefix0x(val) { + verb = 'v' + } else { + verb = 'd' + } + case Dec + Hex + Oct: + verb = 'v' + case Hex: + if prefix0x(val) { + verb = 'v' + } else { + verb = 'x' + } + case Oct: + verb = 'o' + case Hex + Oct: + if prefix0(val) { + verb = 'v' + } else { + return errIntAmbig + } + } + if verb == 0 { + panic("unsupported mode") + } + return ScanFully(intptr, val, verb) +} diff --git a/vendor/github.com/go-git/gcfg/types/scan.go b/vendor/github.com/go-git/gcfg/types/scan.go new file mode 100644 index 0000000000..db2f6ed3ca --- /dev/null +++ b/vendor/github.com/go-git/gcfg/types/scan.go @@ -0,0 +1,23 @@ +package types + +import ( + "fmt" + "io" + "reflect" +) + +// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr. +func ScanFully(ptr interface{}, val string, verb byte) error { + t := reflect.ValueOf(ptr).Elem().Type() + // attempt to read extra bytes to make sure the value is consumed + var b []byte + n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b) + switch { + case n < 1 || n == 1 && err != io.EOF: + return fmt.Errorf("failed to parse %q as %v: %v", val, t, err) + case n > 1: + return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b)) + } + // n == 1 && err == io.EOF + return nil +} diff --git a/vendor/github.com/go-git/go-billy/v5/.gitignore b/vendor/github.com/go-git/go-billy/v5/.gitignore new file mode 100644 index 0000000000..7aeb46699c --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/.gitignore @@ -0,0 +1,4 @@ +/coverage.txt +/vendor +Gopkg.lock +Gopkg.toml diff --git a/vendor/github.com/go-git/go-billy/v5/LICENSE b/vendor/github.com/go-git/go-billy/v5/LICENSE new file mode 100644 index 0000000000..9d60756894 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 Sourced Technologies S.L. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-git/go-billy/v5/README.md b/vendor/github.com/go-git/go-billy/v5/README.md new file mode 100644 index 0000000000..da5c074782 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/README.md @@ -0,0 +1,73 @@ +# go-billy [![GoDoc](https://godoc.org/gopkg.in/go-git/go-billy.v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-billy/v5) [![Test](https://github.com/go-git/go-billy/workflows/Test/badge.svg)](https://github.com/go-git/go-billy/actions?query=workflow%3ATest) + +The missing interface filesystem abstraction for Go. +Billy implements an interface based on the `os` standard library, allowing to develop applications without dependency on the underlying storage. Makes it virtually free to implement mocks and testing over filesystem operations. + +Billy was born as part of [go-git/go-git](https://github.com/go-git/go-git) project. + +## Installation + +```go +import "github.com/go-git/go-billy/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) +import "github.com/go-git/go-billy" // with go modules disabled +``` + +## Usage + +Billy exposes filesystems using the +[`Filesystem` interface](https://pkg.go.dev/github.com/go-git/go-billy/v5?tab=doc#Filesystem). +Each filesystem implementation gives you a `New` method, whose arguments depend on +the implementation itself, that returns a new `Filesystem`. + +The following example caches in memory all readable files in a directory from any +billy's filesystem implementation. + +```go +func LoadToMemory(origin billy.Filesystem, path string) (*memory.Memory, error) { + memory := memory.New() + + files, err := origin.ReadDir("/") + if err != nil { + return nil, err + } + + for _, file := range files { + if file.IsDir() { + continue + } + + src, err := origin.Open(file.Name()) + if err != nil { + return nil, err + } + + dst, err := memory.Create(file.Name()) + if err != nil { + return nil, err + } + + if _, err = io.Copy(dst, src); err != nil { + return nil, err + } + + if err := dst.Close(); err != nil { + return nil, err + } + + if err := src.Close(); err != nil { + return nil, err + } + } + + return memory, nil +} +``` + +## Why billy? + +The library billy deals with storage systems and Billy is the name of a well-known, IKEA +bookcase. That's it. + +## License + +Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/vendor/github.com/go-git/go-billy/v5/fs.go b/vendor/github.com/go-git/go-billy/v5/fs.go new file mode 100644 index 0000000000..a9efccdeb2 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/fs.go @@ -0,0 +1,202 @@ +package billy + +import ( + "errors" + "io" + "os" + "time" +) + +var ( + ErrReadOnly = errors.New("read-only filesystem") + ErrNotSupported = errors.New("feature not supported") + ErrCrossedBoundary = errors.New("chroot boundary crossed") +) + +// Capability holds the supported features of a billy filesystem. This does +// not mean that the capability has to be supported by the underlying storage. +// For example, a billy filesystem may support WriteCapability but the +// storage be mounted in read only mode. +type Capability uint64 + +const ( + // WriteCapability means that the fs is writable. + WriteCapability Capability = 1 << iota + // ReadCapability means that the fs is readable. + ReadCapability + // ReadAndWriteCapability is the ability to open a file in read and write mode. + ReadAndWriteCapability + // SeekCapability means it is able to move position inside the file. + SeekCapability + // TruncateCapability means that a file can be truncated. + TruncateCapability + // LockCapability is the ability to lock a file. + LockCapability + + // DefaultCapabilities lists all capable features supported by filesystems + // without Capability interface. This list should not be changed until a + // major version is released. + DefaultCapabilities Capability = WriteCapability | ReadCapability | + ReadAndWriteCapability | SeekCapability | TruncateCapability | + LockCapability + + // AllCapabilities lists all capable features. + AllCapabilities Capability = WriteCapability | ReadCapability | + ReadAndWriteCapability | SeekCapability | TruncateCapability | + LockCapability +) + +// Filesystem abstract the operations in a storage-agnostic interface. +// Each method implementation mimics the behavior of the equivalent functions +// at the os package from the standard library. +type Filesystem interface { + Basic + TempFile + Dir + Symlink + Chroot +} + +// Basic abstract the basic operations in a storage-agnostic interface as +// an extension to the Basic interface. +type Basic interface { + // Create creates the named file with mode 0666 (before umask), truncating + // it if it already exists. If successful, methods on the returned File can + // be used for I/O; the associated file descriptor has mode O_RDWR. + Create(filename string) (File, error) + // Open opens the named file for reading. If successful, methods on the + // returned file can be used for reading; the associated file descriptor has + // mode O_RDONLY. + Open(filename string) (File, error) + // OpenFile is the generalized open call; most users will use Open or Create + // instead. It opens the named file with specified flag (O_RDONLY etc.) and + // perm, (0666 etc.) if applicable. If successful, methods on the returned + // File can be used for I/O. + OpenFile(filename string, flag int, perm os.FileMode) (File, error) + // Stat returns a FileInfo describing the named file. + Stat(filename string) (os.FileInfo, error) + // Rename renames (moves) oldpath to newpath. If newpath already exists and + // is not a directory, Rename replaces it. OS-specific restrictions may + // apply when oldpath and newpath are in different directories. + Rename(oldpath, newpath string) error + // Remove removes the named file or directory. + Remove(filename string) error + // Join joins any number of path elements into a single path, adding a + // Separator if necessary. Join calls filepath.Clean on the result; in + // particular, all empty strings are ignored. On Windows, the result is a + // UNC path if and only if the first path element is a UNC path. + Join(elem ...string) string +} + +type TempFile interface { + // TempFile creates a new temporary file in the directory dir with a name + // beginning with prefix, opens the file for reading and writing, and + // returns the resulting *os.File. If dir is the empty string, TempFile + // uses the default directory for temporary files (see os.TempDir). + // Multiple programs calling TempFile simultaneously will not choose the + // same file. The caller can use f.Name() to find the pathname of the file. + // It is the caller's responsibility to remove the file when no longer + // needed. + TempFile(dir, prefix string) (File, error) +} + +// Dir abstract the dir related operations in a storage-agnostic interface as +// an extension to the Basic interface. +type Dir interface { + // ReadDir reads the directory named by dirname and returns a list of + // directory entries sorted by filename. + ReadDir(path string) ([]os.FileInfo, error) + // MkdirAll creates a directory named path, along with any necessary + // parents, and returns nil, or else returns an error. The permission bits + // perm are used for all directories that MkdirAll creates. If path is/ + // already a directory, MkdirAll does nothing and returns nil. + MkdirAll(filename string, perm os.FileMode) error +} + +// Symlink abstract the symlink related operations in a storage-agnostic +// interface as an extension to the Basic interface. +type Symlink interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. + Lstat(filename string) (os.FileInfo, error) + // Symlink creates a symbolic-link from link to target. target may be an + // absolute or relative path, and need not refer to an existing node. + // Parent directories of link are created as necessary. + Symlink(target, link string) error + // Readlink returns the target path of link. + Readlink(link string) (string, error) +} + +// Change abstract the FileInfo change related operations in a storage-agnostic +// interface as an extension to the Basic interface +type Change interface { + // Chmod changes the mode of the named file to mode. If the file is a + // symbolic link, it changes the mode of the link's target. + Chmod(name string, mode os.FileMode) error + // Lchown changes the numeric uid and gid of the named file. If the file is + // a symbolic link, it changes the uid and gid of the link itself. + Lchown(name string, uid, gid int) error + // Chown changes the numeric uid and gid of the named file. If the file is a + // symbolic link, it changes the uid and gid of the link's target. + Chown(name string, uid, gid int) error + // Chtimes changes the access and modification times of the named file, + // similar to the Unix utime() or utimes() functions. + // + // The underlying filesystem may truncate or round the values to a less + // precise time unit. + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +// Chroot abstract the chroot related operations in a storage-agnostic interface +// as an extension to the Basic interface. +type Chroot interface { + // Chroot returns a new filesystem from the same type where the new root is + // the given path. Files outside of the designated directory tree cannot be + // accessed. + Chroot(path string) (Filesystem, error) + // Root returns the root path of the filesystem. + Root() string +} + +// File represent a file, being a subset of the os.File +type File interface { + // Name returns the name of the file as presented to Open. + Name() string + io.Writer + io.Reader + io.ReaderAt + io.Seeker + io.Closer + // Lock locks the file like e.g. flock. It protects against access from + // other processes. + Lock() error + // Unlock unlocks the file. + Unlock() error + // Truncate the file. + Truncate(size int64) error +} + +// Capable interface can return the available features of a filesystem. +type Capable interface { + // Capabilities returns the capabilities of a filesystem in bit flags. + Capabilities() Capability +} + +// Capabilities returns the features supported by a filesystem. If the FS +// does not implement Capable interface it returns all features. +func Capabilities(fs Basic) Capability { + capable, ok := fs.(Capable) + if !ok { + return DefaultCapabilities + } + + return capable.Capabilities() +} + +// CapabilityCheck tests the filesystem for the provided capabilities and +// returns true in case it supports all of them. +func CapabilityCheck(fs Basic, capabilities Capability) bool { + fsCaps := Capabilities(fs) + return fsCaps&capabilities == capabilities +} diff --git a/vendor/github.com/go-git/go-billy/v5/go.mod b/vendor/github.com/go-git/go-billy/v5/go.mod new file mode 100644 index 0000000000..78ce0af2a5 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/go.mod @@ -0,0 +1,10 @@ +module github.com/go-git/go-billy/v5 + +require ( + github.com/kr/text v0.2.0 // indirect + github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect + golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 + gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f +) + +go 1.13 diff --git a/vendor/github.com/go-git/go-billy/v5/go.sum b/vendor/github.com/go-git/go-billy/v5/go.sum new file mode 100644 index 0000000000..cdc052bc7e --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/go.sum @@ -0,0 +1,14 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/go-git/go-billy v1.0.0 h1:bXR6Zu3opPSg0R4dDxqaLglY4rxw7ja7wS16qSpOKL4= +github.com/go-git/go-billy v3.1.0+incompatible h1:dwrJ8G2Jt1srYgIJs+lRjA36qBY68O2Lg5idKG8ef5M= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go b/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go new file mode 100644 index 0000000000..8b44e784bd --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/helper/chroot/chroot.go @@ -0,0 +1,242 @@ +package chroot + +import ( + "os" + "path/filepath" + "strings" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/polyfill" +) + +// ChrootHelper is a helper to implement billy.Chroot. +type ChrootHelper struct { + underlying billy.Filesystem + base string +} + +// New creates a new filesystem wrapping up the given 'fs'. +// The created filesystem has its base in the given ChrootHelperectory of the +// underlying filesystem. +func New(fs billy.Basic, base string) billy.Filesystem { + return &ChrootHelper{ + underlying: polyfill.New(fs), + base: base, + } +} + +func (fs *ChrootHelper) underlyingPath(filename string) (string, error) { + if isCrossBoundaries(filename) { + return "", billy.ErrCrossedBoundary + } + + return fs.Join(fs.Root(), filename), nil +} + +func isCrossBoundaries(path string) bool { + path = filepath.ToSlash(path) + path = filepath.Clean(path) + + return strings.HasPrefix(path, ".."+string(filepath.Separator)) +} + +func (fs *ChrootHelper) Create(filename string) (billy.File, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + f, err := fs.underlying.Create(fullpath) + if err != nil { + return nil, err + } + + return newFile(fs, f, filename), nil +} + +func (fs *ChrootHelper) Open(filename string) (billy.File, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + f, err := fs.underlying.Open(fullpath) + if err != nil { + return nil, err + } + + return newFile(fs, f, filename), nil +} + +func (fs *ChrootHelper) OpenFile(filename string, flag int, mode os.FileMode) (billy.File, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + f, err := fs.underlying.OpenFile(fullpath, flag, mode) + if err != nil { + return nil, err + } + + return newFile(fs, f, filename), nil +} + +func (fs *ChrootHelper) Stat(filename string) (os.FileInfo, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + return fs.underlying.Stat(fullpath) +} + +func (fs *ChrootHelper) Rename(from, to string) error { + var err error + from, err = fs.underlyingPath(from) + if err != nil { + return err + } + + to, err = fs.underlyingPath(to) + if err != nil { + return err + } + + return fs.underlying.Rename(from, to) +} + +func (fs *ChrootHelper) Remove(path string) error { + fullpath, err := fs.underlyingPath(path) + if err != nil { + return err + } + + return fs.underlying.Remove(fullpath) +} + +func (fs *ChrootHelper) Join(elem ...string) string { + return fs.underlying.Join(elem...) +} + +func (fs *ChrootHelper) TempFile(dir, prefix string) (billy.File, error) { + fullpath, err := fs.underlyingPath(dir) + if err != nil { + return nil, err + } + + f, err := fs.underlying.(billy.TempFile).TempFile(fullpath, prefix) + if err != nil { + return nil, err + } + + return newFile(fs, f, fs.Join(dir, filepath.Base(f.Name()))), nil +} + +func (fs *ChrootHelper) ReadDir(path string) ([]os.FileInfo, error) { + fullpath, err := fs.underlyingPath(path) + if err != nil { + return nil, err + } + + return fs.underlying.(billy.Dir).ReadDir(fullpath) +} + +func (fs *ChrootHelper) MkdirAll(filename string, perm os.FileMode) error { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return err + } + + return fs.underlying.(billy.Dir).MkdirAll(fullpath, perm) +} + +func (fs *ChrootHelper) Lstat(filename string) (os.FileInfo, error) { + fullpath, err := fs.underlyingPath(filename) + if err != nil { + return nil, err + } + + return fs.underlying.(billy.Symlink).Lstat(fullpath) +} + +func (fs *ChrootHelper) Symlink(target, link string) error { + target = filepath.FromSlash(target) + + // only rewrite target if it's already absolute + if filepath.IsAbs(target) || strings.HasPrefix(target, string(filepath.Separator)) { + target = fs.Join(fs.Root(), target) + target = filepath.Clean(filepath.FromSlash(target)) + } + + link, err := fs.underlyingPath(link) + if err != nil { + return err + } + + return fs.underlying.(billy.Symlink).Symlink(target, link) +} + +func (fs *ChrootHelper) Readlink(link string) (string, error) { + fullpath, err := fs.underlyingPath(link) + if err != nil { + return "", err + } + + target, err := fs.underlying.(billy.Symlink).Readlink(fullpath) + if err != nil { + return "", err + } + + if !filepath.IsAbs(target) && !strings.HasPrefix(target, string(filepath.Separator)) { + return target, nil + } + + target, err = filepath.Rel(fs.base, target) + if err != nil { + return "", err + } + + return string(os.PathSeparator) + target, nil +} + +func (fs *ChrootHelper) Chroot(path string) (billy.Filesystem, error) { + fullpath, err := fs.underlyingPath(path) + if err != nil { + return nil, err + } + + return New(fs.underlying, fullpath), nil +} + +func (fs *ChrootHelper) Root() string { + return fs.base +} + +func (fs *ChrootHelper) Underlying() billy.Basic { + return fs.underlying +} + +// Capabilities implements the Capable interface. +func (fs *ChrootHelper) Capabilities() billy.Capability { + return billy.Capabilities(fs.underlying) +} + +type file struct { + billy.File + name string +} + +func newFile(fs billy.Filesystem, f billy.File, filename string) billy.File { + filename = fs.Join(fs.Root(), filename) + filename, _ = filepath.Rel(fs.Root(), filename) + + return &file{ + File: f, + name: filename, + } +} + +func (f *file) Name() string { + return f.name +} diff --git a/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go b/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go new file mode 100644 index 0000000000..1efce0e7b8 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/helper/polyfill/polyfill.go @@ -0,0 +1,105 @@ +package polyfill + +import ( + "os" + "path/filepath" + + "github.com/go-git/go-billy/v5" +) + +// Polyfill is a helper that implements all missing method from billy.Filesystem. +type Polyfill struct { + billy.Basic + c capabilities +} + +type capabilities struct{ tempfile, dir, symlink, chroot bool } + +// New creates a new filesystem wrapping up 'fs' the intercepts all the calls +// made and errors if fs doesn't implement any of the billy interfaces. +func New(fs billy.Basic) billy.Filesystem { + if original, ok := fs.(billy.Filesystem); ok { + return original + } + + h := &Polyfill{Basic: fs} + + _, h.c.tempfile = h.Basic.(billy.TempFile) + _, h.c.dir = h.Basic.(billy.Dir) + _, h.c.symlink = h.Basic.(billy.Symlink) + _, h.c.chroot = h.Basic.(billy.Chroot) + return h +} + +func (h *Polyfill) TempFile(dir, prefix string) (billy.File, error) { + if !h.c.tempfile { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.TempFile).TempFile(dir, prefix) +} + +func (h *Polyfill) ReadDir(path string) ([]os.FileInfo, error) { + if !h.c.dir { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.Dir).ReadDir(path) +} + +func (h *Polyfill) MkdirAll(filename string, perm os.FileMode) error { + if !h.c.dir { + return billy.ErrNotSupported + } + + return h.Basic.(billy.Dir).MkdirAll(filename, perm) +} + +func (h *Polyfill) Symlink(target, link string) error { + if !h.c.symlink { + return billy.ErrNotSupported + } + + return h.Basic.(billy.Symlink).Symlink(target, link) +} + +func (h *Polyfill) Readlink(link string) (string, error) { + if !h.c.symlink { + return "", billy.ErrNotSupported + } + + return h.Basic.(billy.Symlink).Readlink(link) +} + +func (h *Polyfill) Lstat(path string) (os.FileInfo, error) { + if !h.c.symlink { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.Symlink).Lstat(path) +} + +func (h *Polyfill) Chroot(path string) (billy.Filesystem, error) { + if !h.c.chroot { + return nil, billy.ErrNotSupported + } + + return h.Basic.(billy.Chroot).Chroot(path) +} + +func (h *Polyfill) Root() string { + if !h.c.chroot { + return string(filepath.Separator) + } + + return h.Basic.(billy.Chroot).Root() +} + +func (h *Polyfill) Underlying() billy.Basic { + return h.Basic +} + +// Capabilities implements the Capable interface. +func (h *Polyfill) Capabilities() billy.Capability { + return billy.Capabilities(h.Basic) +} diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/memory.go b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go new file mode 100644 index 0000000000..f217693e6f --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/memfs/memory.go @@ -0,0 +1,410 @@ +// Package memfs provides a billy filesystem base on memory. +package memfs // import "github.com/go-git/go-billy/v5/memfs" + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sort" + "strings" + "time" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" + "github.com/go-git/go-billy/v5/util" +) + +const separator = filepath.Separator + +// Memory a very convenient filesystem based on memory files +type Memory struct { + s *storage + + tempCount int +} + +//New returns a new Memory filesystem. +func New() billy.Filesystem { + fs := &Memory{s: newStorage()} + return chroot.New(fs, string(separator)) +} + +func (fs *Memory) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +func (fs *Memory) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +func (fs *Memory) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + f, has := fs.s.Get(filename) + if !has { + if !isCreate(flag) { + return nil, os.ErrNotExist + } + + var err error + f, err = fs.s.New(filename, perm, flag) + if err != nil { + return nil, err + } + } else { + if isExclusive(flag) { + return nil, os.ErrExist + } + + if target, isLink := fs.resolveLink(filename, f); isLink { + return fs.OpenFile(target, flag, perm) + } + } + + if f.mode.IsDir() { + return nil, fmt.Errorf("cannot open directory: %s", filename) + } + + return f.Duplicate(filename, perm, flag), nil +} + +var errNotLink = errors.New("not a link") + +func (fs *Memory) resolveLink(fullpath string, f *file) (target string, isLink bool) { + if !isSymlink(f.mode) { + return fullpath, false + } + + target = string(f.content.bytes) + if !isAbs(target) { + target = fs.Join(filepath.Dir(fullpath), target) + } + + return target, true +} + +// On Windows OS, IsAbs validates if a path is valid based on if stars with a +// unit (eg.: `C:\`) to assert that is absolute, but in this mem implementation +// any path starting by `separator` is also considered absolute. +func isAbs(path string) bool { + return filepath.IsAbs(path) || strings.HasPrefix(path, string(separator)) +} + +func (fs *Memory) Stat(filename string) (os.FileInfo, error) { + f, has := fs.s.Get(filename) + if !has { + return nil, os.ErrNotExist + } + + fi, _ := f.Stat() + + var err error + if target, isLink := fs.resolveLink(filename, f); isLink { + fi, err = fs.Stat(target) + if err != nil { + return nil, err + } + } + + // the name of the file should always the name of the stated file, so we + // overwrite the Stat returned from the storage with it, since the + // filename may belong to a link. + fi.(*fileInfo).name = filepath.Base(filename) + return fi, nil +} + +func (fs *Memory) Lstat(filename string) (os.FileInfo, error) { + f, has := fs.s.Get(filename) + if !has { + return nil, os.ErrNotExist + } + + return f.Stat() +} + +type ByName []os.FileInfo + +func (a ByName) Len() int { return len(a) } +func (a ByName) Less(i, j int) bool { return a[i].Name() < a[j].Name() } +func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (fs *Memory) ReadDir(path string) ([]os.FileInfo, error) { + if f, has := fs.s.Get(path); has { + if target, isLink := fs.resolveLink(path, f); isLink { + return fs.ReadDir(target) + } + } + + var entries []os.FileInfo + for _, f := range fs.s.Children(path) { + fi, _ := f.Stat() + entries = append(entries, fi) + } + + sort.Sort(ByName(entries)) + + return entries, nil +} + +func (fs *Memory) MkdirAll(path string, perm os.FileMode) error { + _, err := fs.s.New(path, perm|os.ModeDir, 0) + return err +} + +func (fs *Memory) TempFile(dir, prefix string) (billy.File, error) { + return util.TempFile(fs, dir, prefix) +} + +func (fs *Memory) getTempFilename(dir, prefix string) string { + fs.tempCount++ + filename := fmt.Sprintf("%s_%d_%d", prefix, fs.tempCount, time.Now().UnixNano()) + return fs.Join(dir, filename) +} + +func (fs *Memory) Rename(from, to string) error { + return fs.s.Rename(from, to) +} + +func (fs *Memory) Remove(filename string) error { + return fs.s.Remove(filename) +} + +func (fs *Memory) Join(elem ...string) string { + return filepath.Join(elem...) +} + +func (fs *Memory) Symlink(target, link string) error { + _, err := fs.Stat(link) + if err == nil { + return os.ErrExist + } + + if !os.IsNotExist(err) { + return err + } + + return util.WriteFile(fs, link, []byte(target), 0777|os.ModeSymlink) +} + +func (fs *Memory) Readlink(link string) (string, error) { + f, has := fs.s.Get(link) + if !has { + return "", os.ErrNotExist + } + + if !isSymlink(f.mode) { + return "", &os.PathError{ + Op: "readlink", + Path: link, + Err: fmt.Errorf("not a symlink"), + } + } + + return string(f.content.bytes), nil +} + +// Capabilities implements the Capable interface. +func (fs *Memory) Capabilities() billy.Capability { + return billy.WriteCapability | + billy.ReadCapability | + billy.ReadAndWriteCapability | + billy.SeekCapability | + billy.TruncateCapability +} + +type file struct { + name string + content *content + position int64 + flag int + mode os.FileMode + + isClosed bool +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) Read(b []byte) (int, error) { + n, err := f.ReadAt(b, f.position) + f.position += int64(n) + + if err == io.EOF && n != 0 { + err = nil + } + + return n, err +} + +func (f *file) ReadAt(b []byte, off int64) (int, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + if !isReadAndWrite(f.flag) && !isReadOnly(f.flag) { + return 0, errors.New("read not supported") + } + + n, err := f.content.ReadAt(b, off) + + return n, err +} + +func (f *file) Seek(offset int64, whence int) (int64, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + switch whence { + case io.SeekCurrent: + f.position += offset + case io.SeekStart: + f.position = offset + case io.SeekEnd: + f.position = int64(f.content.Len()) + offset + } + + return f.position, nil +} + +func (f *file) Write(p []byte) (int, error) { + if f.isClosed { + return 0, os.ErrClosed + } + + if !isReadAndWrite(f.flag) && !isWriteOnly(f.flag) { + return 0, errors.New("write not supported") + } + + n, err := f.content.WriteAt(p, f.position) + f.position += int64(n) + + return n, err +} + +func (f *file) Close() error { + if f.isClosed { + return os.ErrClosed + } + + f.isClosed = true + return nil +} + +func (f *file) Truncate(size int64) error { + if size < int64(len(f.content.bytes)) { + f.content.bytes = f.content.bytes[:size] + } else if more := int(size) - len(f.content.bytes); more > 0 { + f.content.bytes = append(f.content.bytes, make([]byte, more)...) + } + + return nil +} + +func (f *file) Duplicate(filename string, mode os.FileMode, flag int) billy.File { + new := &file{ + name: filename, + content: f.content, + mode: mode, + flag: flag, + } + + if isAppend(flag) { + new.position = int64(new.content.Len()) + } + + if isTruncate(flag) { + new.content.Truncate() + } + + return new +} + +func (f *file) Stat() (os.FileInfo, error) { + return &fileInfo{ + name: f.Name(), + mode: f.mode, + size: f.content.Len(), + }, nil +} + +// Lock is a no-op in memfs. +func (f *file) Lock() error { + return nil +} + +// Unlock is a no-op in memfs. +func (f *file) Unlock() error { + return nil +} + +type fileInfo struct { + name string + size int + mode os.FileMode +} + +func (fi *fileInfo) Name() string { + return fi.name +} + +func (fi *fileInfo) Size() int64 { + return int64(fi.size) +} + +func (fi *fileInfo) Mode() os.FileMode { + return fi.mode +} + +func (*fileInfo) ModTime() time.Time { + return time.Now() +} + +func (fi *fileInfo) IsDir() bool { + return fi.mode.IsDir() +} + +func (*fileInfo) Sys() interface{} { + return nil +} + +func (c *content) Truncate() { + c.bytes = make([]byte, 0) +} + +func (c *content) Len() int { + return len(c.bytes) +} + +func isCreate(flag int) bool { + return flag&os.O_CREATE != 0 +} + +func isExclusive(flag int) bool { + return flag&os.O_EXCL != 0 +} + +func isAppend(flag int) bool { + return flag&os.O_APPEND != 0 +} + +func isTruncate(flag int) bool { + return flag&os.O_TRUNC != 0 +} + +func isReadAndWrite(flag int) bool { + return flag&os.O_RDWR != 0 +} + +func isReadOnly(flag int) bool { + return flag == os.O_RDONLY +} + +func isWriteOnly(flag int) bool { + return flag&os.O_WRONLY != 0 +} + +func isSymlink(m os.FileMode) bool { + return m&os.ModeSymlink != 0 +} diff --git a/vendor/github.com/go-git/go-billy/v5/memfs/storage.go b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go new file mode 100644 index 0000000000..d3ff5a25db --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/memfs/storage.go @@ -0,0 +1,229 @@ +package memfs + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" +) + +type storage struct { + files map[string]*file + children map[string]map[string]*file +} + +func newStorage() *storage { + return &storage{ + files: make(map[string]*file, 0), + children: make(map[string]map[string]*file, 0), + } +} + +func (s *storage) Has(path string) bool { + path = clean(path) + + _, ok := s.files[path] + return ok +} + +func (s *storage) New(path string, mode os.FileMode, flag int) (*file, error) { + path = clean(path) + if s.Has(path) { + if !s.MustGet(path).mode.IsDir() { + return nil, fmt.Errorf("file already exists %q", path) + } + + return nil, nil + } + + name := filepath.Base(path) + + f := &file{ + name: name, + content: &content{name: name}, + mode: mode, + flag: flag, + } + + s.files[path] = f + s.createParent(path, mode, f) + return f, nil +} + +func (s *storage) createParent(path string, mode os.FileMode, f *file) error { + base := filepath.Dir(path) + base = clean(base) + if f.Name() == string(separator) { + return nil + } + + if _, err := s.New(base, mode.Perm()|os.ModeDir, 0); err != nil { + return err + } + + if _, ok := s.children[base]; !ok { + s.children[base] = make(map[string]*file, 0) + } + + s.children[base][f.Name()] = f + return nil +} + +func (s *storage) Children(path string) []*file { + path = clean(path) + + l := make([]*file, 0) + for _, f := range s.children[path] { + l = append(l, f) + } + + return l +} + +func (s *storage) MustGet(path string) *file { + f, ok := s.Get(path) + if !ok { + panic(fmt.Errorf("couldn't find %q", path)) + } + + return f +} + +func (s *storage) Get(path string) (*file, bool) { + path = clean(path) + if !s.Has(path) { + return nil, false + } + + file, ok := s.files[path] + return file, ok +} + +func (s *storage) Rename(from, to string) error { + from = clean(from) + to = clean(to) + + if !s.Has(from) { + return os.ErrNotExist + } + + move := [][2]string{{from, to}} + + for pathFrom := range s.files { + if pathFrom == from || !filepath.HasPrefix(pathFrom, from) { + continue + } + + rel, _ := filepath.Rel(from, pathFrom) + pathTo := filepath.Join(to, rel) + + move = append(move, [2]string{pathFrom, pathTo}) + } + + for _, ops := range move { + from := ops[0] + to := ops[1] + + if err := s.move(from, to); err != nil { + return err + } + } + + return nil +} + +func (s *storage) move(from, to string) error { + s.files[to] = s.files[from] + s.files[to].name = filepath.Base(to) + s.children[to] = s.children[from] + + defer func() { + delete(s.children, from) + delete(s.files, from) + delete(s.children[filepath.Dir(from)], filepath.Base(from)) + }() + + return s.createParent(to, 0644, s.files[to]) +} + +func (s *storage) Remove(path string) error { + path = clean(path) + + f, has := s.Get(path) + if !has { + return os.ErrNotExist + } + + if f.mode.IsDir() && len(s.children[path]) != 0 { + return fmt.Errorf("dir: %s contains files", path) + } + + base, file := filepath.Split(path) + base = filepath.Clean(base) + + delete(s.children[base], file) + delete(s.files, path) + return nil +} + +func clean(path string) string { + return filepath.Clean(filepath.FromSlash(path)) +} + +type content struct { + name string + bytes []byte +} + +func (c *content) WriteAt(p []byte, off int64) (int, error) { + if off < 0 { + return 0, &os.PathError{ + Op: "writeat", + Path: c.name, + Err: errors.New("negative offset"), + } + } + + prev := len(c.bytes) + + diff := int(off) - prev + if diff > 0 { + c.bytes = append(c.bytes, make([]byte, diff)...) + } + + c.bytes = append(c.bytes[:off], p...) + if len(c.bytes) < prev { + c.bytes = c.bytes[:prev] + } + + return len(p), nil +} + +func (c *content) ReadAt(b []byte, off int64) (n int, err error) { + if off < 0 { + return 0, &os.PathError{ + Op: "readat", + Path: c.name, + Err: errors.New("negative offset"), + } + } + + size := int64(len(c.bytes)) + if off >= size { + return 0, io.EOF + } + + l := int64(len(b)) + if off+l > size { + l = size - off + } + + btr := c.bytes[off : off+l] + if len(btr) < len(b) { + err = io.EOF + } + n = copy(b, btr) + + return +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os.go b/vendor/github.com/go-git/go-billy/v5/osfs/os.go new file mode 100644 index 0000000000..9665d2755d --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os.go @@ -0,0 +1,144 @@ +// +build !js + +// Package osfs provides a billy filesystem for the OS. +package osfs // import "github.com/go-git/go-billy/v5/osfs" + +import ( + "io/ioutil" + "os" + "path/filepath" + "sync" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" +) + +const ( + defaultDirectoryMode = 0755 + defaultCreateMode = 0666 +) + +// Default Filesystem representing the root of the os filesystem. +var Default = &OS{} + +// OS is a filesystem based on the os filesystem. +type OS struct{} + +// New returns a new OS filesystem. +func New(baseDir string) billy.Filesystem { + return chroot.New(Default, baseDir) +} + +func (fs *OS) Create(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, defaultCreateMode) +} + +func (fs *OS) OpenFile(filename string, flag int, perm os.FileMode) (billy.File, error) { + if flag&os.O_CREATE != 0 { + if err := fs.createDir(filename); err != nil { + return nil, err + } + } + + f, err := os.OpenFile(filename, flag, perm) + if err != nil { + return nil, err + } + return &file{File: f}, err +} + +func (fs *OS) createDir(fullpath string) error { + dir := filepath.Dir(fullpath) + if dir != "." { + if err := os.MkdirAll(dir, defaultDirectoryMode); err != nil { + return err + } + } + + return nil +} + +func (fs *OS) ReadDir(path string) ([]os.FileInfo, error) { + l, err := ioutil.ReadDir(path) + if err != nil { + return nil, err + } + + var s = make([]os.FileInfo, len(l)) + for i, f := range l { + s[i] = f + } + + return s, nil +} + +func (fs *OS) Rename(from, to string) error { + if err := fs.createDir(to); err != nil { + return err + } + + return rename(from, to) +} + +func (fs *OS) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, defaultDirectoryMode) +} + +func (fs *OS) Open(filename string) (billy.File, error) { + return fs.OpenFile(filename, os.O_RDONLY, 0) +} + +func (fs *OS) Stat(filename string) (os.FileInfo, error) { + return os.Stat(filename) +} + +func (fs *OS) Remove(filename string) error { + return os.Remove(filename) +} + +func (fs *OS) TempFile(dir, prefix string) (billy.File, error) { + if err := fs.createDir(dir + string(os.PathSeparator)); err != nil { + return nil, err + } + + f, err := ioutil.TempFile(dir, prefix) + if err != nil { + return nil, err + } + return &file{File: f}, nil +} + +func (fs *OS) Join(elem ...string) string { + return filepath.Join(elem...) +} + +func (fs *OS) RemoveAll(path string) error { + return os.RemoveAll(filepath.Clean(path)) +} + +func (fs *OS) Lstat(filename string) (os.FileInfo, error) { + return os.Lstat(filepath.Clean(filename)) +} + +func (fs *OS) Symlink(target, link string) error { + if err := fs.createDir(link); err != nil { + return err + } + + return os.Symlink(target, link) +} + +func (fs *OS) Readlink(link string) (string, error) { + return os.Readlink(link) +} + +// Capabilities implements the Capable interface. +func (fs *OS) Capabilities() billy.Capability { + return billy.DefaultCapabilities +} + +// file is a wrapper for an os.File which adds support for file locking. +type file struct { + *os.File + m sync.Mutex +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go new file mode 100644 index 0000000000..8ae68fed68 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_js.go @@ -0,0 +1,21 @@ +// +build js + +package osfs + +import ( + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-billy/v5/helper/chroot" + "github.com/go-git/go-billy/v5/memfs" +) + +// globalMemFs is the global memory fs +var globalMemFs = memfs.New() + +// Default Filesystem representing the root of in-memory filesystem for a +// js/wasm environment. +var Default = memfs.New() + +// New returns a new OS filesystem. +func New(baseDir string) billy.Filesystem { + return chroot.New(Default, Default.Join("/", baseDir)) +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go new file mode 100644 index 0000000000..e8f519ffe2 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_plan9.go @@ -0,0 +1,85 @@ +// +build plan9 + +package osfs + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +func (f *file) Lock() error { + // Plan 9 uses a mode bit instead of explicit lock/unlock syscalls. + // + // Per http://man.cat-v.org/plan_9/5/stat: “Exclusive use files may be open + // for I/O by only one fid at a time across all clients of the server. If a + // second open is attempted, it draws an error.” + // + // There is no obvious way to implement this function using the exclusive use bit. + // See https://golang.org/src/cmd/go/internal/lockedfile/lockedfile_plan9.go + // for how file locking is done by the go tool on Plan 9. + return nil +} + +func (f *file) Unlock() error { + return nil +} + +func rename(from, to string) error { + // If from and to are in different directories, copy the file + // since Plan 9 does not support cross-directory rename. + if filepath.Dir(from) != filepath.Dir(to) { + fi, err := os.Stat(from) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + if fi.Mode().IsDir() { + return &os.LinkError{"rename", from, to, syscall.EISDIR} + } + fromFile, err := os.Open(from) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + toFile, err := os.OpenFile(to, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, fi.Mode()) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + _, err = io.Copy(toFile, fromFile) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + + // Copy mtime and mode from original file. + // We need only one syscall if we avoid os.Chmod and os.Chtimes. + dir := fi.Sys().(*syscall.Dir) + var d syscall.Dir + d.Null() + d.Mtime = dir.Mtime + d.Mode = dir.Mode + if err = dirwstat(to, &d); err != nil { + return &os.LinkError{"rename", from, to, err} + } + + // Remove original file. + err = os.Remove(from) + if err != nil { + return &os.LinkError{"rename", from, to, err} + } + return nil + } + return os.Rename(from, to) +} + +func dirwstat(name string, d *syscall.Dir) error { + var buf [syscall.STATFIXLEN]byte + + n, err := d.Marshal(buf[:]) + if err != nil { + return &os.PathError{"dirwstat", name, err} + } + if err = syscall.Wstat(name, buf[:n]); err != nil { + return &os.PathError{"dirwstat", name, err} + } + return nil +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go new file mode 100644 index 0000000000..c74d60ee6b --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_posix.go @@ -0,0 +1,27 @@ +// +build !plan9,!windows,!js + +package osfs + +import ( + "os" + + "golang.org/x/sys/unix" +) + +func (f *file) Lock() error { + f.m.Lock() + defer f.m.Unlock() + + return unix.Flock(int(f.File.Fd()), unix.LOCK_EX) +} + +func (f *file) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + return unix.Flock(int(f.File.Fd()), unix.LOCK_UN) +} + +func rename(from, to string) error { + return os.Rename(from, to) +} diff --git a/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go b/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go new file mode 100644 index 0000000000..8f5caeb0ed --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/osfs/os_windows.go @@ -0,0 +1,61 @@ +// +build windows + +package osfs + +import ( + "os" + "runtime" + "unsafe" + + "golang.org/x/sys/windows" +) + +type fileInfo struct { + os.FileInfo + name string +} + +func (fi *fileInfo) Name() string { + return fi.name +} + +var ( + kernel32DLL = windows.NewLazySystemDLL("kernel32.dll") + lockFileExProc = kernel32DLL.NewProc("LockFileEx") + unlockFileProc = kernel32DLL.NewProc("UnlockFile") +) + +const ( + lockfileExclusiveLock = 0x2 +) + +func (f *file) Lock() error { + f.m.Lock() + defer f.m.Unlock() + + var overlapped windows.Overlapped + // err is always non-nil as per sys/windows semantics. + ret, _, err := lockFileExProc.Call(f.File.Fd(), lockfileExclusiveLock, 0, 0xFFFFFFFF, 0, + uintptr(unsafe.Pointer(&overlapped))) + runtime.KeepAlive(&overlapped) + if ret == 0 { + return err + } + return nil +} + +func (f *file) Unlock() error { + f.m.Lock() + defer f.m.Unlock() + + // err is always non-nil as per sys/windows semantics. + ret, _, err := unlockFileProc.Call(f.File.Fd(), 0, 0, 0xFFFFFFFF, 0) + if ret == 0 { + return err + } + return nil +} + +func rename(from, to string) error { + return os.Rename(from, to) +} diff --git a/vendor/github.com/go-git/go-billy/v5/util/glob.go b/vendor/github.com/go-git/go-billy/v5/util/glob.go new file mode 100644 index 0000000000..f7cb1de896 --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/util/glob.go @@ -0,0 +1,111 @@ +package util + +import ( + "path/filepath" + "sort" + "strings" + + "github.com/go-git/go-billy/v5" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// Function originally from https://golang.org/src/path/filepath/match_test.go +func Glob(fs billy.Filesystem, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + if _, err = fs.Lstat(pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + // Prevent infinite recursion. See issue 15879. + if dir == pattern { + return nil, filepath.ErrBadPattern + } + + var m []string + m, err = Glob(fs, cleanGlobPath(dir)) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// cleanGlobPath prepares path for glob matching. +func cleanGlobPath(path string) string { + switch path { + case "": + return "." + case string(filepath.Separator): + // do nothing to the path + return path + default: + return path[0 : len(path)-1] // chop off trailing separator + } +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs billy.Filesystem, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + + if !fi.IsDir() { + return + } + + names, _ := readdirnames(fs, dir) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.ContainsAny(path, "*?[") +} + +func readdirnames(fs billy.Filesystem, dir string) ([]string, error) { + files, err := fs.ReadDir(dir) + if err != nil { + return nil, err + } + + var names []string + for _, file := range files { + names = append(names, file.Name()) + } + + return names, nil +} diff --git a/vendor/github.com/go-git/go-billy/v5/util/util.go b/vendor/github.com/go-git/go-billy/v5/util/util.go new file mode 100644 index 0000000000..5c77128c3c --- /dev/null +++ b/vendor/github.com/go-git/go-billy/v5/util/util.go @@ -0,0 +1,282 @@ +package util + +import ( + "io" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "github.com/go-git/go-billy/v5" +) + +// RemoveAll removes path and any children it contains. It removes everything it +// can but returns the first error it encounters. If the path does not exist, +// RemoveAll returns nil (no error). +func RemoveAll(fs billy.Basic, path string) error { + fs, path = getUnderlyingAndPath(fs, path) + + if r, ok := fs.(removerAll); ok { + return r.RemoveAll(path) + } + + return removeAll(fs, path) +} + +type removerAll interface { + RemoveAll(string) error +} + +func removeAll(fs billy.Basic, path string) error { + // This implementation is adapted from os.RemoveAll. + + // Simple case: if Remove works, we're done. + err := fs.Remove(path) + if err == nil || os.IsNotExist(err) { + return nil + } + + // Otherwise, is this a directory we need to recurse into? + dir, serr := fs.Stat(path) + if serr != nil { + if os.IsNotExist(serr) { + return nil + } + + return serr + } + + if !dir.IsDir() { + // Not a directory; return the error from Remove. + return err + } + + dirfs, ok := fs.(billy.Dir) + if !ok { + return billy.ErrNotSupported + } + + // Directory. + fis, err := dirfs.ReadDir(path) + if err != nil { + if os.IsNotExist(err) { + // Race. It was deleted between the Lstat and Open. + // Return nil per RemoveAll's docs. + return nil + } + + return err + } + + // Remove contents & return first error. + err = nil + for _, fi := range fis { + cpath := fs.Join(path, fi.Name()) + err1 := removeAll(fs, cpath) + if err == nil { + err = err1 + } + } + + // Remove directory. + err1 := fs.Remove(path) + if err1 == nil || os.IsNotExist(err1) { + return nil + } + + if err == nil { + err = err1 + } + + return err + +} + +// WriteFile writes data to a file named by filename in the given filesystem. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func WriteFile(fs billy.Basic, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + + if err1 := f.Close(); err == nil { + err = err1 + } + + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var rand uint32 +var randmu sync.Mutex + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextSuffix() string { + randmu.Lock() + r := rand + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + rand = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir with a name +// beginning with prefix, opens the file for reading and writing, and returns +// the resulting *os.File. If dir is the empty string, TempFile uses the default +// directory for temporary files (see os.TempDir). Multiple programs calling +// TempFile simultaneously will not choose the same file. The caller can use +// f.Name() to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func TempFile(fs billy.Basic, dir, prefix string) (f billy.File, err error) { + // This implementation is based on stdlib ioutil.TempFile. + if dir == "" { + dir = getTempDir(fs) + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextSuffix()) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func TempDir(fs billy.Dir, dir, prefix string) (name string, err error) { + // This implementation is based on stdlib ioutil.TempDir + + if dir == "" { + dir = getTempDir(fs.(billy.Basic)) + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextSuffix()) + err = fs.MkdirAll(try, 0700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + rand = reseed() + randmu.Unlock() + } + continue + } + if os.IsNotExist(err) { + if _, err := os.Stat(dir); os.IsNotExist(err) { + return "", err + } + } + if err == nil { + name = try + } + break + } + return +} + +func getTempDir(fs billy.Basic) string { + ch, ok := fs.(billy.Chroot) + if !ok || ch.Root() == "" || ch.Root() == "/" || ch.Root() == string(filepath.Separator) { + return os.TempDir() + } + + return ".tmp" +} + +type underlying interface { + Underlying() billy.Basic +} + +func getUnderlyingAndPath(fs billy.Basic, path string) (billy.Basic, string) { + u, ok := fs.(underlying) + if !ok { + return fs, path + } + if ch, ok := fs.(billy.Chroot); ok { + path = fs.Join(ch.Root(), path) + } + + return u.Underlying(), path +} + +// ReadFile reads the named file and returns the contents from the given filesystem. +// A successful call returns err == nil, not err == EOF. +// Because ReadFile reads the whole file, it does not treat an EOF from Read +// as an error to be reported. +func ReadFile(fs billy.Basic, name string) ([]byte, error) { + f, err := fs.Open(name) + if err != nil { + return nil, err + } + + defer f.Close() + + var size int + if info, err := fs.Stat(name); err == nil { + size64 := info.Size() + if int64(int(size64)) == size64 { + size = int(size64) + } + } + + size++ // one byte for final read at EOF + // If a file claims a small size, read at least 512 bytes. + // In particular, files in Linux's /proc claim size 0 but + // then do not work right if read in small pieces, + // so an initial read of 1 byte would not work correctly. + + if size < 512 { + size = 512 + } + + data := make([]byte, 0, size) + for { + if len(data) >= cap(data) { + d := append(data[:cap(data)], 0) + data = d[:len(data)] + } + + n, err := f.Read(data[len(data):cap(data)]) + data = data[:len(data)+n] + + if err != nil { + if err == io.EOF { + err = nil + } + + return data, err + } + } +} diff --git a/vendor/github.com/go-git/go-git/v5/.gitignore b/vendor/github.com/go-git/go-git/v5/.gitignore new file mode 100644 index 0000000000..038dd9f1ed --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/.gitignore @@ -0,0 +1,4 @@ +coverage.out +*~ +coverage.txt +profile.out diff --git a/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md b/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000..a689fa3c34 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/CODE_OF_CONDUCT.md @@ -0,0 +1,74 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of experience, +education, socio-economic status, nationality, personal appearance, race, +religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at conduct@sourced.tech. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + diff --git a/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md new file mode 100644 index 0000000000..2a72b501e2 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md @@ -0,0 +1,111 @@ +Supported Capabilities +====================== + +Here is a non-comprehensive table of git commands and features whose equivalent +is supported by go-git. + +| Feature | Status | Notes | +|---------------------------------------|--------|-------| +| **config** | +| config | ✔ | Reading and modifying per-repository configuration (`.git/config`) is supported. Global configuration (`$HOME/.gitconfig`) is not. | +| **getting and creating repositories** | +| init | ✔ | Plain init and `--bare` are supported. Flags `--template`, `--separate-git-dir` and `--shared` are not. | +| clone | ✔ | Plain clone and equivalents to `--progress`, `--single-branch`, `--depth`, `--origin`, `--recurse-submodules` are supported. Others are not. | +| **basic snapshotting** | +| add | ✔ | Plain add is supported. Any other flags aren't supported | +| status | ✔ | +| commit | ✔ | +| reset | ✔ | +| rm | ✔ | +| mv | ✔ | +| **branching and merging** | +| branch | ✔ | +| checkout | ✔ | Basic usages of checkout are supported. | +| merge | ✖ | +| mergetool | ✖ | +| stash | ✖ | +| tag | ✔ | +| **sharing and updating projects** | +| fetch | ✔ | +| pull | ✔ | Only supports merges where the merge can be resolved as a fast-forward. | +| push | ✔ | +| remote | ✔ | +| submodule | ✔ | +| **inspection and comparison** | +| show | ✔ | +| log | ✔ | +| shortlog | (see log) | +| describe | | +| **patching** | +| apply | ✖ | +| cherry-pick | ✖ | +| diff | ✔ | Patch object with UnifiedDiff output representation | +| rebase | ✖ | +| revert | ✖ | +| **debugging** | +| bisect | ✖ | +| blame | ✔ | +| grep | ✔ | +| **email** || +| am | ✖ | +| apply | ✖ | +| format-patch | ✖ | +| send-email | ✖ | +| request-pull | ✖ | +| **external systems** | +| svn | ✖ | +| fast-import | ✖ | +| **administration** | +| clean | ✔ | +| gc | ✖ | +| fsck | ✖ | +| reflog | ✖ | +| filter-branch | ✖ | +| instaweb | ✖ | +| archive | ✖ | +| bundle | ✖ | +| prune | ✖ | +| repack | ✖ | +| **server admin** | +| daemon | | +| update-server-info | | +| **advanced** | +| notes | ✖ | +| replace | ✖ | +| worktree | ✖ | +| annotate | (see blame) | +| **gpg** | +| git-verify-commit | ✔ | +| git-verify-tag | ✔ | +| **plumbing commands** | +| cat-file | ✔ | +| check-ignore | | +| commit-tree | | +| count-objects | | +| diff-index | | +| for-each-ref | ✔ | +| hash-object | ✔ | +| ls-files | ✔ | +| merge-base | ✔ | Calculates the merge-base only between two commits, and supports `--independent` and `--is-ancestor` modifiers; Does not support `--fork-point` nor `--octopus` modifiers. | +| read-tree | | +| rev-list | ✔ | +| rev-parse | | +| show-ref | ✔ | +| symbolic-ref | ✔ | +| update-index | | +| update-ref | | +| verify-pack | | +| write-tree | | +| **protocols** | +| http(s):// (dumb) | ✖ | +| http(s):// (smart) | ✔ | +| git:// | ✔ | +| ssh:// | ✔ | +| file:// | partial | Warning: this is not pure Golang. This shells out to the `git` binary. | +| custom | ✔ | +| **other features** | +| gitignore | ✔ | +| gitattributes | ✖ | +| index version | | +| packfile version | | +| push-certs | ✖ | diff --git a/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md b/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md new file mode 100644 index 0000000000..fce25328a7 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md @@ -0,0 +1,46 @@ +# Contributing Guidelines + +source{d} go-git project is [Apache 2.0 licensed](LICENSE) and accepts +contributions via GitHub pull requests. This document outlines some of the +conventions on development workflow, commit message formatting, contact points, +and other resources to make it easier to get your contribution accepted. + +## Support Channels + +The official support channels, for both users and contributors, are: + +- [StackOverflow go-git tag](https://stackoverflow.com/questions/tagged/go-git) for user questions. +- GitHub [Issues](https://github.com/src-d/go-git/issues)* for bug reports and feature requests. + +*Before opening a new issue or submitting a new pull request, it's helpful to +search the project - it's likely that another user has already reported the +issue you're facing, or it's a known issue that we're already aware of. + + +## How to Contribute + +Pull Requests (PRs) are the main and exclusive way to contribute to the official go-git project. +In order for a PR to be accepted it needs to pass a list of requirements: + +- You should be able to run the same query using `git`. We don't accept features that are not implemented in the official git implementation. +- The expected behavior must match the [official git implementation](https://github.com/git/git). +- The actual behavior must be correctly explained with natural language and providing a minimum working example in Go that reproduces it. +- All PRs must be written in idiomatic Go, formatted according to [gofmt](https://golang.org/cmd/gofmt/), and without any warnings from [go lint](https://github.com/golang/lint) nor [go vet](https://golang.org/cmd/vet/). +- They should in general include tests, and those shall pass. +- If the PR is a bug fix, it has to include a suite of unit tests for the new functionality. +- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality. +- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git. + +### Format of the commit message + +Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to: + +``` +plumbing: packp, Skip argument validations for unknown capabilities. Fixes #623 +``` + +The format can be described more formally as follows: + +``` +: , . [Fixes #] +``` diff --git a/vendor/github.com/go-git/go-git/v5/LICENSE b/vendor/github.com/go-git/go-git/v5/LICENSE new file mode 100644 index 0000000000..8aa3d854cf --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Sourced Technologies, S.L. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-git/go-git/v5/Makefile b/vendor/github.com/go-git/go-git/v5/Makefile new file mode 100644 index 0000000000..d10922fb10 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/Makefile @@ -0,0 +1,38 @@ +# General +WORKDIR = $(PWD) + +# Go parameters +GOCMD = go +GOTEST = $(GOCMD) test + +# Git config +GIT_VERSION ?= +GIT_DIST_PATH ?= $(PWD)/.git-dist +GIT_REPOSITORY = http://github.com/git/git.git + +# Coverage +COVERAGE_REPORT = coverage.out +COVERAGE_MODE = count + +build-git: + @if [ -f $(GIT_DIST_PATH)/git ]; then \ + echo "nothing to do, using cache $(GIT_DIST_PATH)"; \ + else \ + git clone $(GIT_REPOSITORY) -b $(GIT_VERSION) --depth 1 --single-branch $(GIT_DIST_PATH); \ + cd $(GIT_DIST_PATH); \ + make configure; \ + ./configure; \ + make all; \ + fi + +test: + @echo "running against `git version`"; \ + $(GOTEST) ./... + +test-coverage: + @echo "running against `git version`"; \ + echo "" > $(COVERAGE_REPORT); \ + $(GOTEST) -coverprofile=$(COVERAGE_REPORT) -coverpkg=./... -covermode=$(COVERAGE_MODE) ./... + +clean: + rm -rf $(GIT_DIST_PATH) \ No newline at end of file diff --git a/vendor/github.com/go-git/go-git/v5/README.md b/vendor/github.com/go-git/go-git/v5/README.md new file mode 100644 index 0000000000..ff0c9b72ba --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/README.md @@ -0,0 +1,131 @@ +![go-git logo](https://cdn.rawgit.com/src-d/artwork/02036484/go-git/files/go-git-github-readme-header.png) +[![GoDoc](https://godoc.org/github.com/go-git/go-git/v5?status.svg)](https://pkg.go.dev/github.com/go-git/go-git/v5) [![Build Status](https://github.com/go-git/go-git/workflows/Test/badge.svg)](https://github.com/go-git/go-git/actions) [![Go Report Card](https://goreportcard.com/badge/github.com/go-git/go-git)](https://goreportcard.com/report/github.com/go-git/go-git) + +*go-git* is a highly extensible git implementation library written in **pure Go**. + +It can be used to manipulate git repositories at low level *(plumbing)* or high level *(porcelain)*, through an idiomatic Go API. It also supports several types of storage, such as in-memory filesystems, or custom implementations, thanks to the [`Storer`](https://pkg.go.dev/github.com/go-git/go-git/v5/plumbing/storer) interface. + +It's being actively developed since 2015 and is being used extensively by [Keybase](https://keybase.io/blog/encrypted-git-for-everyone), [Gitea](https://gitea.io/en-us/) or [Pulumi](https://github.com/search?q=org%3Apulumi+go-git&type=Code), and by many other libraries and tools. + +Project Status +-------------- + +After the legal issues with the [`src-d`](https://github.com/src-d) organization, the lack of update for four months and the requirement to make a hard fork, the project is **now back to normality**. + +The project is currently actively maintained by individual contributors, including several of the original authors, but also backed by a new company, [gitsight](https://github.com/gitsight), where `go-git` is a critical component used at scale. + + +Comparison with git +------------------- + +*go-git* aims to be fully compatible with [git](https://github.com/git/git), all the *porcelain* operations are implemented to work exactly as *git* does. + +*git* is a humongous project with years of development by thousands of contributors, making it challenging for *go-git* to implement all the features. You can find a comparison of *go-git* vs *git* in the [compatibility documentation](COMPATIBILITY.md). + + +Installation +------------ + +The recommended way to install *go-git* is: + +```go +import "github.com/go-git/go-git/v5" // with go modules enabled (GO111MODULE=on or outside GOPATH) +import "github.com/go-git/go-git" // with go modules disabled +``` + + +Examples +-------- + +> Please note that the `CheckIfError` and `Info` functions used in the examples are from the [examples package](https://github.com/go-git/go-git/blob/master/_examples/common.go#L19) just to be used in the examples. + + +### Basic example + +A basic example that mimics the standard `git clone` command + +```go +// Clone the given repository to the given directory +Info("git clone https://github.com/go-git/go-git") + +_, err := git.PlainClone("/tmp/foo", false, &git.CloneOptions{ + URL: "https://github.com/go-git/go-git", + Progress: os.Stdout, +}) + +CheckIfError(err) +``` + +Outputs: +``` +Counting objects: 4924, done. +Compressing objects: 100% (1333/1333), done. +Total 4924 (delta 530), reused 6 (delta 6), pack-reused 3533 +``` + +### In-memory example + +Cloning a repository into memory and printing the history of HEAD, just like `git log` does + + +```go +// Clones the given repository in memory, creating the remote, the local +// branches and fetching the objects, exactly as: +Info("git clone https://github.com/go-git/go-billy") + +r, err := git.Clone(memory.NewStorage(), nil, &git.CloneOptions{ + URL: "https://github.com/go-git/go-billy", +}) + +CheckIfError(err) + +// Gets the HEAD history from HEAD, just like this command: +Info("git log") + +// ... retrieves the branch pointed by HEAD +ref, err := r.Head() +CheckIfError(err) + + +// ... retrieves the commit history +cIter, err := r.Log(&git.LogOptions{From: ref.Hash()}) +CheckIfError(err) + +// ... just iterates over the commits, printing it +err = cIter.ForEach(func(c *object.Commit) error { + fmt.Println(c) + return nil +}) +CheckIfError(err) +``` + +Outputs: +``` +commit ded8054fd0c3994453e9c8aacaf48d118d42991e +Author: Santiago M. Mola +Date: Sat Nov 12 21:18:41 2016 +0100 + + index: ReadFrom/WriteTo returns IndexReadError/IndexWriteError. (#9) + +commit df707095626f384ce2dc1a83b30f9a21d69b9dfc +Author: Santiago M. Mola +Date: Fri Nov 11 13:23:22 2016 +0100 + + readwriter: fix bug when writing index. (#10) + + When using ReadWriter on an existing siva file, absolute offset for + index entries was not being calculated correctly. +... +``` + +You can find this [example](_examples/log/main.go) and many others in the [examples](_examples) folder. + +Contribute +---------- + +[Contributions](https://github.com/go-git/go-git/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22) are more than welcome, if you are interested please take a look to +our [Contributing Guidelines](CONTRIBUTING.md). + +License +------- +Apache License Version 2.0, see [LICENSE](LICENSE) diff --git a/vendor/github.com/go-git/go-git/v5/blame.go b/vendor/github.com/go-git/go-git/v5/blame.go new file mode 100644 index 0000000000..43634b32ca --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/blame.go @@ -0,0 +1,302 @@ +package git + +import ( + "bytes" + "errors" + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/utils/diff" +) + +// BlameResult represents the result of a Blame operation. +type BlameResult struct { + // Path is the path of the File that we're blaming. + Path string + // Rev (Revision) is the hash of the specified Commit used to generate this result. + Rev plumbing.Hash + // Lines contains every line with its authorship. + Lines []*Line +} + +// Blame returns a BlameResult with the information about the last author of +// each line from file `path` at commit `c`. +func Blame(c *object.Commit, path string) (*BlameResult, error) { + // The file to blame is identified by the input arguments: + // commit and path. commit is a Commit object obtained from a Repository. Path + // represents a path to a specific file contained into the repository. + // + // Blaming a file is a two step process: + // + // 1. Create a linear history of the commits affecting a file. We use + // revlist.New for that. + // + // 2. Then build a graph with a node for every line in every file in + // the history of the file. + // + // Each node is assigned a commit: Start by the nodes in the first + // commit. Assign that commit as the creator of all its lines. + // + // Then jump to the nodes in the next commit, and calculate the diff + // between the two files. Newly created lines get + // assigned the new commit as its origin. Modified lines also get + // this new commit. Untouched lines retain the old commit. + // + // All this work is done in the assignOrigin function which holds all + // the internal relevant data in a "blame" struct, that is not + // exported. + // + // TODO: ways to improve the efficiency of this function: + // 1. Improve revlist + // 2. Improve how to traverse the history (example a backward traversal will + // be much more efficient) + // + // TODO: ways to improve the function in general: + // 1. Add memoization between revlist and assign. + // 2. It is using much more memory than needed, see the TODOs below. + + b := new(blame) + b.fRev = c + b.path = path + + // get all the file revisions + if err := b.fillRevs(); err != nil { + return nil, err + } + + // calculate the line tracking graph and fill in + // file contents in data. + if err := b.fillGraphAndData(); err != nil { + return nil, err + } + + file, err := b.fRev.File(b.path) + if err != nil { + return nil, err + } + finalLines, err := file.Lines() + if err != nil { + return nil, err + } + + // Each node (line) holds the commit where it was introduced or + // last modified. To achieve that we use the FORWARD algorithm + // described in Zimmermann, et al. "Mining Version Archives for + // Co-changed Lines", in proceedings of the Mining Software + // Repositories workshop, Shanghai, May 22-23, 2006. + lines, err := newLines(finalLines, b.sliceGraph(len(b.graph)-1)) + if err != nil { + return nil, err + } + + return &BlameResult{ + Path: path, + Rev: c.Hash, + Lines: lines, + }, nil +} + +// Line values represent the contents and author of a line in BlamedResult values. +type Line struct { + // Author is the email address of the last author that modified the line. + Author string + // Text is the original text of the line. + Text string + // Date is when the original text of the line was introduced + Date time.Time + // Hash is the commit hash that introduced the original line + Hash plumbing.Hash +} + +func newLine(author, text string, date time.Time, hash plumbing.Hash) *Line { + return &Line{ + Author: author, + Text: text, + Hash: hash, + Date: date, + } +} + +func newLines(contents []string, commits []*object.Commit) ([]*Line, error) { + lcontents := len(contents) + lcommits := len(commits) + + if lcontents != lcommits { + if lcontents == lcommits-1 && contents[lcontents-1] != "\n" { + contents = append(contents, "\n") + } else { + return nil, errors.New("contents and commits have different length") + } + } + + result := make([]*Line, 0, lcontents) + for i := range contents { + result = append(result, newLine( + commits[i].Author.Email, contents[i], + commits[i].Author.When, commits[i].Hash, + )) + } + + return result, nil +} + +// this struct is internally used by the blame function to hold its +// inputs, outputs and state. +type blame struct { + // the path of the file to blame + path string + // the commit of the final revision of the file to blame + fRev *object.Commit + // the chain of revisions affecting the the file to blame + revs []*object.Commit + // the contents of the file across all its revisions + data []string + // the graph of the lines in the file across all the revisions + graph [][]*object.Commit +} + +// calculate the history of a file "path", starting from commit "from", sorted by commit date. +func (b *blame) fillRevs() error { + var err error + + b.revs, err = references(b.fRev, b.path) + return err +} + +// build graph of a file from its revision history +func (b *blame) fillGraphAndData() error { + //TODO: not all commits are needed, only the current rev and the prev + b.graph = make([][]*object.Commit, len(b.revs)) + b.data = make([]string, len(b.revs)) // file contents in all the revisions + // for every revision of the file, starting with the first + // one... + for i, rev := range b.revs { + // get the contents of the file + file, err := rev.File(b.path) + if err != nil { + return nil + } + b.data[i], err = file.Contents() + if err != nil { + return err + } + nLines := countLines(b.data[i]) + // create a node for each line + b.graph[i] = make([]*object.Commit, nLines) + // assign a commit to each node + // if this is the first revision, then the node is assigned to + // this first commit. + if i == 0 { + for j := 0; j < nLines; j++ { + b.graph[i][j] = b.revs[i] + } + } else { + // if this is not the first commit, then assign to the old + // commit or to the new one, depending on what the diff + // says. + b.assignOrigin(i, i-1) + } + } + return nil +} + +// sliceGraph returns a slice of commits (one per line) for a particular +// revision of a file (0=first revision). +func (b *blame) sliceGraph(i int) []*object.Commit { + fVs := b.graph[i] + result := make([]*object.Commit, 0, len(fVs)) + for _, v := range fVs { + c := *v + result = append(result, &c) + } + return result +} + +// Assigns origin to vertexes in current (c) rev from data in its previous (p) +// revision +func (b *blame) assignOrigin(c, p int) { + // assign origin based on diff info + hunks := diff.Do(b.data[p], b.data[c]) + sl := -1 // source line + dl := -1 // destination line + for h := range hunks { + hLines := countLines(hunks[h].Text) + for hl := 0; hl < hLines; hl++ { + switch { + case hunks[h].Type == 0: + sl++ + dl++ + b.graph[c][dl] = b.graph[p][sl] + case hunks[h].Type == 1: + dl++ + b.graph[c][dl] = b.revs[c] + case hunks[h].Type == -1: + sl++ + default: + panic("unreachable") + } + } + } +} + +// GoString prints the results of a Blame using git-blame's style. +func (b *blame) GoString() string { + var buf bytes.Buffer + + file, err := b.fRev.File(b.path) + if err != nil { + panic("PrettyPrint: internal error in repo.Data") + } + contents, err := file.Contents() + if err != nil { + panic("PrettyPrint: internal error in repo.Data") + } + + lines := strings.Split(contents, "\n") + // max line number length + mlnl := len(strconv.Itoa(len(lines))) + // max author length + mal := b.maxAuthorLength() + format := fmt.Sprintf("%%s (%%-%ds %%%dd) %%s\n", + mal, mlnl) + + fVs := b.graph[len(b.graph)-1] + for ln, v := range fVs { + fmt.Fprintf(&buf, format, v.Hash.String()[:8], + prettyPrintAuthor(fVs[ln]), ln+1, lines[ln]) + } + return buf.String() +} + +// utility function to pretty print the author. +func prettyPrintAuthor(c *object.Commit) string { + return fmt.Sprintf("%s %s", c.Author.Name, c.Author.When.Format("2006-01-02")) +} + +// utility function to calculate the number of runes needed +// to print the longest author name in the blame of a file. +func (b *blame) maxAuthorLength() int { + memo := make(map[plumbing.Hash]struct{}, len(b.graph)-1) + fVs := b.graph[len(b.graph)-1] + m := 0 + for ln := range fVs { + if _, ok := memo[fVs[ln].Hash]; ok { + continue + } + memo[fVs[ln].Hash] = struct{}{} + m = max(m, utf8.RuneCountInString(prettyPrintAuthor(fVs[ln]))) + } + return m +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} diff --git a/vendor/github.com/go-git/go-git/v5/common.go b/vendor/github.com/go-git/go-git/v5/common.go new file mode 100644 index 0000000000..6174339a81 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/common.go @@ -0,0 +1,20 @@ +package git + +import "strings" + +// countLines returns the number of lines in a string à la git, this is +// The newline character is assumed to be '\n'. The empty string +// contains 0 lines. If the last line of the string doesn't end with a +// newline, it will still be considered a line. +func countLines(s string) int { + if s == "" { + return 0 + } + + nEOL := strings.Count(s, "\n") + if strings.HasSuffix(s, "\n") { + return nEOL + } + + return nEOL + 1 +} diff --git a/vendor/github.com/go-git/go-git/v5/config/branch.go b/vendor/github.com/go-git/go-git/v5/config/branch.go new file mode 100644 index 0000000000..fe86cf542c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/branch.go @@ -0,0 +1,90 @@ +package config + +import ( + "errors" + + "github.com/go-git/go-git/v5/plumbing" + format "github.com/go-git/go-git/v5/plumbing/format/config" +) + +var ( + errBranchEmptyName = errors.New("branch config: empty name") + errBranchInvalidMerge = errors.New("branch config: invalid merge") + errBranchInvalidRebase = errors.New("branch config: rebase must be one of 'true' or 'interactive'") +) + +// Branch contains information on the +// local branches and which remote to track +type Branch struct { + // Name of branch + Name string + // Remote name of remote to track + Remote string + // Merge is the local refspec for the branch + Merge plumbing.ReferenceName + // Rebase instead of merge when pulling. Valid values are + // "true" and "interactive". "false" is undocumented and + // typically represented by the non-existence of this field + Rebase string + + raw *format.Subsection +} + +// Validate validates fields of branch +func (b *Branch) Validate() error { + if b.Name == "" { + return errBranchEmptyName + } + + if b.Merge != "" && !b.Merge.IsBranch() { + return errBranchInvalidMerge + } + + if b.Rebase != "" && + b.Rebase != "true" && + b.Rebase != "interactive" && + b.Rebase != "false" { + return errBranchInvalidRebase + } + + return nil +} + +func (b *Branch) marshal() *format.Subsection { + if b.raw == nil { + b.raw = &format.Subsection{} + } + + b.raw.Name = b.Name + + if b.Remote == "" { + b.raw.RemoveOption(remoteSection) + } else { + b.raw.SetOption(remoteSection, b.Remote) + } + + if b.Merge == "" { + b.raw.RemoveOption(mergeKey) + } else { + b.raw.SetOption(mergeKey, string(b.Merge)) + } + + if b.Rebase == "" { + b.raw.RemoveOption(rebaseKey) + } else { + b.raw.SetOption(rebaseKey, b.Rebase) + } + + return b.raw +} + +func (b *Branch) unmarshal(s *format.Subsection) error { + b.raw = s + + b.Name = b.raw.Name + b.Remote = b.raw.Options.Get(remoteSection) + b.Merge = plumbing.ReferenceName(b.raw.Options.Get(mergeKey)) + b.Rebase = b.raw.Options.Get(rebaseKey) + + return b.Validate() +} diff --git a/vendor/github.com/go-git/go-git/v5/config/config.go b/vendor/github.com/go-git/go-git/v5/config/config.go new file mode 100644 index 0000000000..1aee25a4c5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/config.go @@ -0,0 +1,659 @@ +// Package config contains the abstraction of multiple config files +package config + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + + "github.com/go-git/go-billy/v5/osfs" + "github.com/go-git/go-git/v5/internal/url" + format "github.com/go-git/go-git/v5/plumbing/format/config" + "github.com/mitchellh/go-homedir" +) + +const ( + // DefaultFetchRefSpec is the default refspec used for fetch. + DefaultFetchRefSpec = "+refs/heads/*:refs/remotes/%s/*" + // DefaultPushRefSpec is the default refspec used for push. + DefaultPushRefSpec = "refs/heads/*:refs/heads/*" +) + +// ConfigStorer generic storage of Config object +type ConfigStorer interface { + Config() (*Config, error) + SetConfig(*Config) error +} + +var ( + ErrInvalid = errors.New("config invalid key in remote or branch") + ErrRemoteConfigNotFound = errors.New("remote config not found") + ErrRemoteConfigEmptyURL = errors.New("remote config: empty URL") + ErrRemoteConfigEmptyName = errors.New("remote config: empty name") +) + +// Scope defines the scope of a config file, such as local, global or system. +type Scope int + +// Available ConfigScope's +const ( + LocalScope Scope = iota + GlobalScope + SystemScope +) + +// Config contains the repository configuration +// https://www.kernel.org/pub/software/scm/git/docs/git-config.html#FILES +type Config struct { + Core struct { + // IsBare if true this repository is assumed to be bare and has no + // working directory associated with it. + IsBare bool + // Worktree is the path to the root of the working tree. + Worktree string + // CommentChar is the character indicating the start of a + // comment for commands like commit and tag + CommentChar string + } + + User struct { + // Name is the personal name of the author and the commiter of a commit. + Name string + // Email is the email of the author and the commiter of a commit. + Email string + } + + Author struct { + // Name is the personal name of the author of a commit. + Name string + // Email is the email of the author of a commit. + Email string + } + + Committer struct { + // Name is the personal name of the commiter of a commit. + Name string + // Email is the email of the the commiter of a commit. + Email string + } + + Pack struct { + // Window controls the size of the sliding window for delta + // compression. The default is 10. A value of 0 turns off + // delta compression entirely. + Window uint + } + + Init struct { + // DefaultBranch Allows overriding the default branch name + // e.g. when initializing a new repository or when cloning + // an empty repository. + DefaultBranch string + } + + // Remotes list of repository remotes, the key of the map is the name + // of the remote, should equal to RemoteConfig.Name. + Remotes map[string]*RemoteConfig + // Submodules list of repository submodules, the key of the map is the name + // of the submodule, should equal to Submodule.Name. + Submodules map[string]*Submodule + // Branches list of branches, the key is the branch name and should + // equal Branch.Name + Branches map[string]*Branch + // URLs list of url rewrite rules, if repo url starts with URL.InsteadOf value, it will be replaced with the + // key instead. + URLs map[string]*URL + // Raw contains the raw information of a config file. The main goal is + // preserve the parsed information from the original format, to avoid + // dropping unsupported fields. + Raw *format.Config +} + +// NewConfig returns a new empty Config. +func NewConfig() *Config { + config := &Config{ + Remotes: make(map[string]*RemoteConfig), + Submodules: make(map[string]*Submodule), + Branches: make(map[string]*Branch), + URLs: make(map[string]*URL), + Raw: format.New(), + } + + config.Pack.Window = DefaultPackWindow + + return config +} + +// ReadConfig reads a config file from a io.Reader. +func ReadConfig(r io.Reader) (*Config, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + cfg := NewConfig() + if err = cfg.Unmarshal(b); err != nil { + return nil, err + } + + return cfg, nil +} + +// LoadConfig loads a config file from a given scope. The returned Config, +// contains exclusively information fom the given scope. If couldn't find a +// config file to the given scope, a empty one is returned. +func LoadConfig(scope Scope) (*Config, error) { + if scope == LocalScope { + return nil, fmt.Errorf("LocalScope should be read from the a ConfigStorer.") + } + + files, err := Paths(scope) + if err != nil { + return nil, err + } + + for _, file := range files { + f, err := osfs.Default.Open(file) + if err != nil { + if os.IsNotExist(err) { + continue + } + + return nil, err + } + + defer f.Close() + return ReadConfig(f) + } + + return NewConfig(), nil +} + +// Paths returns the config file location for a given scope. +func Paths(scope Scope) ([]string, error) { + var files []string + switch scope { + case GlobalScope: + xdg := os.Getenv("XDG_CONFIG_HOME") + if xdg != "" { + files = append(files, filepath.Join(xdg, "git/config")) + } + + home, err := homedir.Dir() + if err != nil { + return nil, err + } + + files = append(files, + filepath.Join(home, ".gitconfig"), + filepath.Join(home, ".config/git/config"), + ) + case SystemScope: + files = append(files, "/etc/gitconfig") + } + + return files, nil +} + +// Validate validates the fields and sets the default values. +func (c *Config) Validate() error { + for name, r := range c.Remotes { + if r.Name != name { + return ErrInvalid + } + + if err := r.Validate(); err != nil { + return err + } + } + + for name, b := range c.Branches { + if b.Name != name { + return ErrInvalid + } + + if err := b.Validate(); err != nil { + return err + } + } + + return nil +} + +const ( + remoteSection = "remote" + submoduleSection = "submodule" + branchSection = "branch" + coreSection = "core" + packSection = "pack" + userSection = "user" + authorSection = "author" + committerSection = "committer" + initSection = "init" + urlSection = "url" + fetchKey = "fetch" + urlKey = "url" + bareKey = "bare" + worktreeKey = "worktree" + commentCharKey = "commentChar" + windowKey = "window" + mergeKey = "merge" + rebaseKey = "rebase" + nameKey = "name" + emailKey = "email" + defaultBranchKey = "defaultBranch" + + // DefaultPackWindow holds the number of previous objects used to + // generate deltas. The value 10 is the same used by git command. + DefaultPackWindow = uint(10) +) + +// Unmarshal parses a git-config file and stores it. +func (c *Config) Unmarshal(b []byte) error { + r := bytes.NewBuffer(b) + d := format.NewDecoder(r) + + c.Raw = format.New() + if err := d.Decode(c.Raw); err != nil { + return err + } + + c.unmarshalCore() + c.unmarshalUser() + c.unmarshalInit() + if err := c.unmarshalPack(); err != nil { + return err + } + unmarshalSubmodules(c.Raw, c.Submodules) + + if err := c.unmarshalBranches(); err != nil { + return err + } + + if err := c.unmarshalURLs(); err != nil { + return err + } + + return c.unmarshalRemotes() +} + +func (c *Config) unmarshalCore() { + s := c.Raw.Section(coreSection) + if s.Options.Get(bareKey) == "true" { + c.Core.IsBare = true + } + + c.Core.Worktree = s.Options.Get(worktreeKey) + c.Core.CommentChar = s.Options.Get(commentCharKey) +} + +func (c *Config) unmarshalUser() { + s := c.Raw.Section(userSection) + c.User.Name = s.Options.Get(nameKey) + c.User.Email = s.Options.Get(emailKey) + + s = c.Raw.Section(authorSection) + c.Author.Name = s.Options.Get(nameKey) + c.Author.Email = s.Options.Get(emailKey) + + s = c.Raw.Section(committerSection) + c.Committer.Name = s.Options.Get(nameKey) + c.Committer.Email = s.Options.Get(emailKey) +} + +func (c *Config) unmarshalPack() error { + s := c.Raw.Section(packSection) + window := s.Options.Get(windowKey) + if window == "" { + c.Pack.Window = DefaultPackWindow + } else { + winUint, err := strconv.ParseUint(window, 10, 32) + if err != nil { + return err + } + c.Pack.Window = uint(winUint) + } + return nil +} + +func (c *Config) unmarshalRemotes() error { + s := c.Raw.Section(remoteSection) + for _, sub := range s.Subsections { + r := &RemoteConfig{} + if err := r.unmarshal(sub); err != nil { + return err + } + + c.Remotes[r.Name] = r + } + + // Apply insteadOf url rules + for _, r := range c.Remotes { + r.applyURLRules(c.URLs) + } + + return nil +} + +func (c *Config) unmarshalURLs() error { + s := c.Raw.Section(urlSection) + for _, sub := range s.Subsections { + r := &URL{} + if err := r.unmarshal(sub); err != nil { + return err + } + + c.URLs[r.Name] = r + } + + return nil +} + +func unmarshalSubmodules(fc *format.Config, submodules map[string]*Submodule) { + s := fc.Section(submoduleSection) + for _, sub := range s.Subsections { + m := &Submodule{} + m.unmarshal(sub) + + if m.Validate() == ErrModuleBadPath { + continue + } + + submodules[m.Name] = m + } +} + +func (c *Config) unmarshalBranches() error { + bs := c.Raw.Section(branchSection) + for _, sub := range bs.Subsections { + b := &Branch{} + + if err := b.unmarshal(sub); err != nil { + return err + } + + c.Branches[b.Name] = b + } + return nil +} + +func (c *Config) unmarshalInit() { + s := c.Raw.Section(initSection) + c.Init.DefaultBranch = s.Options.Get(defaultBranchKey) +} + +// Marshal returns Config encoded as a git-config file. +func (c *Config) Marshal() ([]byte, error) { + c.marshalCore() + c.marshalUser() + c.marshalPack() + c.marshalRemotes() + c.marshalSubmodules() + c.marshalBranches() + c.marshalURLs() + c.marshalInit() + + buf := bytes.NewBuffer(nil) + if err := format.NewEncoder(buf).Encode(c.Raw); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (c *Config) marshalCore() { + s := c.Raw.Section(coreSection) + s.SetOption(bareKey, fmt.Sprintf("%t", c.Core.IsBare)) + + if c.Core.Worktree != "" { + s.SetOption(worktreeKey, c.Core.Worktree) + } +} + +func (c *Config) marshalUser() { + s := c.Raw.Section(userSection) + if c.User.Name != "" { + s.SetOption(nameKey, c.User.Name) + } + + if c.User.Email != "" { + s.SetOption(emailKey, c.User.Email) + } + + s = c.Raw.Section(authorSection) + if c.Author.Name != "" { + s.SetOption(nameKey, c.Author.Name) + } + + if c.Author.Email != "" { + s.SetOption(emailKey, c.Author.Email) + } + + s = c.Raw.Section(committerSection) + if c.Committer.Name != "" { + s.SetOption(nameKey, c.Committer.Name) + } + + if c.Committer.Email != "" { + s.SetOption(emailKey, c.Committer.Email) + } +} + +func (c *Config) marshalPack() { + s := c.Raw.Section(packSection) + if c.Pack.Window != DefaultPackWindow { + s.SetOption(windowKey, fmt.Sprintf("%d", c.Pack.Window)) + } +} + +func (c *Config) marshalRemotes() { + s := c.Raw.Section(remoteSection) + newSubsections := make(format.Subsections, 0, len(c.Remotes)) + added := make(map[string]bool) + for _, subsection := range s.Subsections { + if remote, ok := c.Remotes[subsection.Name]; ok { + newSubsections = append(newSubsections, remote.marshal()) + added[subsection.Name] = true + } + } + + remoteNames := make([]string, 0, len(c.Remotes)) + for name := range c.Remotes { + remoteNames = append(remoteNames, name) + } + + sort.Strings(remoteNames) + + for _, name := range remoteNames { + if !added[name] { + newSubsections = append(newSubsections, c.Remotes[name].marshal()) + } + } + + s.Subsections = newSubsections +} + +func (c *Config) marshalSubmodules() { + s := c.Raw.Section(submoduleSection) + s.Subsections = make(format.Subsections, len(c.Submodules)) + + var i int + for _, r := range c.Submodules { + section := r.marshal() + // the submodule section at config is a subset of the .gitmodule file + // we should remove the non-valid options for the config file. + section.RemoveOption(pathKey) + s.Subsections[i] = section + i++ + } +} + +func (c *Config) marshalBranches() { + s := c.Raw.Section(branchSection) + newSubsections := make(format.Subsections, 0, len(c.Branches)) + added := make(map[string]bool) + for _, subsection := range s.Subsections { + if branch, ok := c.Branches[subsection.Name]; ok { + newSubsections = append(newSubsections, branch.marshal()) + added[subsection.Name] = true + } + } + + branchNames := make([]string, 0, len(c.Branches)) + for name := range c.Branches { + branchNames = append(branchNames, name) + } + + sort.Strings(branchNames) + + for _, name := range branchNames { + if !added[name] { + newSubsections = append(newSubsections, c.Branches[name].marshal()) + } + } + + s.Subsections = newSubsections +} + +func (c *Config) marshalURLs() { + s := c.Raw.Section(urlSection) + s.Subsections = make(format.Subsections, len(c.URLs)) + + var i int + for _, r := range c.URLs { + section := r.marshal() + // the submodule section at config is a subset of the .gitmodule file + // we should remove the non-valid options for the config file. + s.Subsections[i] = section + i++ + } +} + +func (c *Config) marshalInit() { + s := c.Raw.Section(initSection) + if c.Init.DefaultBranch != "" { + s.SetOption(defaultBranchKey, c.Init.DefaultBranch) + } +} + +// RemoteConfig contains the configuration for a given remote repository. +type RemoteConfig struct { + // Name of the remote + Name string + // URLs the URLs of a remote repository. It must be non-empty. Fetch will + // always use the first URL, while push will use all of them. + URLs []string + + // insteadOfRulesApplied have urls been modified + insteadOfRulesApplied bool + // originalURLs are the urls before applying insteadOf rules + originalURLs []string + + // Fetch the default set of "refspec" for fetch operation + Fetch []RefSpec + + // raw representation of the subsection, filled by marshal or unmarshal are + // called + raw *format.Subsection +} + +// Validate validates the fields and sets the default values. +func (c *RemoteConfig) Validate() error { + if c.Name == "" { + return ErrRemoteConfigEmptyName + } + + if len(c.URLs) == 0 { + return ErrRemoteConfigEmptyURL + } + + for _, r := range c.Fetch { + if err := r.Validate(); err != nil { + return err + } + } + + if len(c.Fetch) == 0 { + c.Fetch = []RefSpec{RefSpec(fmt.Sprintf(DefaultFetchRefSpec, c.Name))} + } + + return nil +} + +func (c *RemoteConfig) unmarshal(s *format.Subsection) error { + c.raw = s + + fetch := []RefSpec{} + for _, f := range c.raw.Options.GetAll(fetchKey) { + rs := RefSpec(f) + if err := rs.Validate(); err != nil { + return err + } + + fetch = append(fetch, rs) + } + + c.Name = c.raw.Name + c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...) + c.Fetch = fetch + + return nil +} + +func (c *RemoteConfig) marshal() *format.Subsection { + if c.raw == nil { + c.raw = &format.Subsection{} + } + + c.raw.Name = c.Name + if len(c.URLs) == 0 { + c.raw.RemoveOption(urlKey) + } else { + urls := c.URLs + if c.insteadOfRulesApplied { + urls = c.originalURLs + } + + c.raw.SetOption(urlKey, urls...) + } + + if len(c.Fetch) == 0 { + c.raw.RemoveOption(fetchKey) + } else { + var values []string + for _, rs := range c.Fetch { + values = append(values, rs.String()) + } + + c.raw.SetOption(fetchKey, values...) + } + + return c.raw +} + +func (c *RemoteConfig) IsFirstURLLocal() bool { + return url.IsLocalEndpoint(c.URLs[0]) +} + +func (c *RemoteConfig) applyURLRules(urlRules map[string]*URL) { + // save original urls + originalURLs := make([]string, len(c.URLs)) + copy(originalURLs, c.URLs) + + for i, url := range c.URLs { + if matchingURLRule := findLongestInsteadOfMatch(url, urlRules); matchingURLRule != nil { + c.URLs[i] = matchingURLRule.ApplyInsteadOf(c.URLs[i]) + c.insteadOfRulesApplied = true + } + } + + if c.insteadOfRulesApplied { + c.originalURLs = originalURLs + } +} diff --git a/vendor/github.com/go-git/go-git/v5/config/modules.go b/vendor/github.com/go-git/go-git/v5/config/modules.go new file mode 100644 index 0000000000..1c10aa354e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/modules.go @@ -0,0 +1,139 @@ +package config + +import ( + "bytes" + "errors" + "regexp" + + format "github.com/go-git/go-git/v5/plumbing/format/config" +) + +var ( + ErrModuleEmptyURL = errors.New("module config: empty URL") + ErrModuleEmptyPath = errors.New("module config: empty path") + ErrModuleBadPath = errors.New("submodule has an invalid path") +) + +var ( + // Matches module paths with dotdot ".." components. + dotdotPath = regexp.MustCompile(`(^|[/\\])\.\.([/\\]|$)`) +) + +// Modules defines the submodules properties, represents a .gitmodules file +// https://www.kernel.org/pub/software/scm/git/docs/gitmodules.html +type Modules struct { + // Submodules is a map of submodules being the key the name of the submodule. + Submodules map[string]*Submodule + + raw *format.Config +} + +// NewModules returns a new empty Modules +func NewModules() *Modules { + return &Modules{ + Submodules: make(map[string]*Submodule), + raw: format.New(), + } +} + +const ( + pathKey = "path" + branchKey = "branch" +) + +// Unmarshal parses a git-config file and stores it. +func (m *Modules) Unmarshal(b []byte) error { + r := bytes.NewBuffer(b) + d := format.NewDecoder(r) + + m.raw = format.New() + if err := d.Decode(m.raw); err != nil { + return err + } + + unmarshalSubmodules(m.raw, m.Submodules) + return nil +} + +// Marshal returns Modules encoded as a git-config file. +func (m *Modules) Marshal() ([]byte, error) { + s := m.raw.Section(submoduleSection) + s.Subsections = make(format.Subsections, len(m.Submodules)) + + var i int + for _, r := range m.Submodules { + s.Subsections[i] = r.marshal() + i++ + } + + buf := bytes.NewBuffer(nil) + if err := format.NewEncoder(buf).Encode(m.raw); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// Submodule defines a submodule. +type Submodule struct { + // Name module name + Name string + // Path defines the path, relative to the top-level directory of the Git + // working tree. + Path string + // URL defines a URL from which the submodule repository can be cloned. + URL string + // Branch is a remote branch name for tracking updates in the upstream + // submodule. Optional value. + Branch string + + // raw representation of the subsection, filled by marshal or unmarshal are + // called. + raw *format.Subsection +} + +// Validate validates the fields and sets the default values. +func (m *Submodule) Validate() error { + if m.Path == "" { + return ErrModuleEmptyPath + } + + if m.URL == "" { + return ErrModuleEmptyURL + } + + if dotdotPath.MatchString(m.Path) { + return ErrModuleBadPath + } + + return nil +} + +func (m *Submodule) unmarshal(s *format.Subsection) { + m.raw = s + + m.Name = m.raw.Name + m.Path = m.raw.Option(pathKey) + m.URL = m.raw.Option(urlKey) + m.Branch = m.raw.Option(branchKey) +} + +func (m *Submodule) marshal() *format.Subsection { + if m.raw == nil { + m.raw = &format.Subsection{} + } + + m.raw.Name = m.Name + if m.raw.Name == "" { + m.raw.Name = m.Path + } + + m.raw.SetOption(pathKey, m.Path) + m.raw.SetOption(urlKey, m.URL) + + if m.Branch != "" { + m.raw.SetOption(branchKey, m.Branch) + } + + return m.raw +} diff --git a/vendor/github.com/go-git/go-git/v5/config/refspec.go b/vendor/github.com/go-git/go-git/v5/config/refspec.go new file mode 100644 index 0000000000..4bfaa37bbb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/refspec.go @@ -0,0 +1,155 @@ +package config + +import ( + "errors" + "strings" + + "github.com/go-git/go-git/v5/plumbing" +) + +const ( + refSpecWildcard = "*" + refSpecForce = "+" + refSpecSeparator = ":" +) + +var ( + ErrRefSpecMalformedSeparator = errors.New("malformed refspec, separators are wrong") + ErrRefSpecMalformedWildcard = errors.New("malformed refspec, mismatched number of wildcards") +) + +// RefSpec is a mapping from local branches to remote references. +// The format of the refspec is an optional +, followed by :, where +// is the pattern for references on the remote side and is where +// those references will be written locally. The + tells Git to update the +// reference even if it isn’t a fast-forward. +// eg.: "+refs/heads/*:refs/remotes/origin/*" +// +// https://git-scm.com/book/en/v2/Git-Internals-The-Refspec +type RefSpec string + +// Validate validates the RefSpec +func (s RefSpec) Validate() error { + spec := string(s) + if strings.Count(spec, refSpecSeparator) != 1 { + return ErrRefSpecMalformedSeparator + } + + sep := strings.Index(spec, refSpecSeparator) + if sep == len(spec)-1 { + return ErrRefSpecMalformedSeparator + } + + ws := strings.Count(spec[0:sep], refSpecWildcard) + wd := strings.Count(spec[sep+1:], refSpecWildcard) + if ws == wd && ws < 2 && wd < 2 { + return nil + } + + return ErrRefSpecMalformedWildcard +} + +// IsForceUpdate returns if update is allowed in non fast-forward merges. +func (s RefSpec) IsForceUpdate() bool { + return s[0] == refSpecForce[0] +} + +// IsDelete returns true if the refspec indicates a delete (empty src). +func (s RefSpec) IsDelete() bool { + return s[0] == refSpecSeparator[0] +} + +// IsExactSHA1 returns true if the source is a SHA1 hash. +func (s RefSpec) IsExactSHA1() bool { + return plumbing.IsHash(s.Src()) +} + +// Src return the src side. +func (s RefSpec) Src() string { + spec := string(s) + + var start int + if s.IsForceUpdate() { + start = 1 + } else { + start = 0 + } + + end := strings.Index(spec, refSpecSeparator) + return spec[start:end] +} + +// Match match the given plumbing.ReferenceName against the source. +func (s RefSpec) Match(n plumbing.ReferenceName) bool { + if !s.IsWildcard() { + return s.matchExact(n) + } + + return s.matchGlob(n) +} + +// IsWildcard returns true if the RefSpec contains a wildcard. +func (s RefSpec) IsWildcard() bool { + return strings.Contains(string(s), refSpecWildcard) +} + +func (s RefSpec) matchExact(n plumbing.ReferenceName) bool { + return s.Src() == n.String() +} + +func (s RefSpec) matchGlob(n plumbing.ReferenceName) bool { + src := s.Src() + name := n.String() + wildcard := strings.Index(src, refSpecWildcard) + + var prefix, suffix string + prefix = src[0:wildcard] + if len(src) > wildcard+1 { + suffix = src[wildcard+1:] + } + + return len(name) >= len(prefix)+len(suffix) && + strings.HasPrefix(name, prefix) && + strings.HasSuffix(name, suffix) +} + +// Dst returns the destination for the given remote reference. +func (s RefSpec) Dst(n plumbing.ReferenceName) plumbing.ReferenceName { + spec := string(s) + start := strings.Index(spec, refSpecSeparator) + 1 + dst := spec[start:] + src := s.Src() + + if !s.IsWildcard() { + return plumbing.ReferenceName(dst) + } + + name := n.String() + ws := strings.Index(src, refSpecWildcard) + wd := strings.Index(dst, refSpecWildcard) + match := name[ws : len(name)-(len(src)-(ws+1))] + + return plumbing.ReferenceName(dst[0:wd] + match + dst[wd+1:]) +} + +func (s RefSpec) Reverse() RefSpec { + spec := string(s) + separator := strings.Index(spec, refSpecSeparator) + + return RefSpec(spec[separator+1:] + refSpecSeparator + spec[:separator]) +} + +func (s RefSpec) String() string { + return string(s) +} + +// MatchAny returns true if any of the RefSpec match with the given ReferenceName. +func MatchAny(l []RefSpec, n plumbing.ReferenceName) bool { + for _, r := range l { + if r.Match(n) { + return true + } + } + + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/config/url.go b/vendor/github.com/go-git/go-git/v5/config/url.go new file mode 100644 index 0000000000..114d6b2662 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/config/url.go @@ -0,0 +1,81 @@ +package config + +import ( + "errors" + "strings" + + format "github.com/go-git/go-git/v5/plumbing/format/config" +) + +var ( + errURLEmptyInsteadOf = errors.New("url config: empty insteadOf") +) + +// Url defines Url rewrite rules +type URL struct { + // Name new base url + Name string + // Any URL that starts with this value will be rewritten to start, instead, with . + // When more than one insteadOf strings match a given URL, the longest match is used. + InsteadOf string + + // raw representation of the subsection, filled by marshal or unmarshal are + // called. + raw *format.Subsection +} + +// Validate validates fields of branch +func (b *URL) Validate() error { + if b.InsteadOf == "" { + return errURLEmptyInsteadOf + } + + return nil +} + +const ( + insteadOfKey = "insteadOf" +) + +func (u *URL) unmarshal(s *format.Subsection) error { + u.raw = s + + u.Name = s.Name + u.InsteadOf = u.raw.Option(insteadOfKey) + return nil +} + +func (u *URL) marshal() *format.Subsection { + if u.raw == nil { + u.raw = &format.Subsection{} + } + + u.raw.Name = u.Name + u.raw.SetOption(insteadOfKey, u.InsteadOf) + + return u.raw +} + +func findLongestInsteadOfMatch(remoteURL string, urls map[string]*URL) *URL { + var longestMatch *URL + for _, u := range urls { + if !strings.HasPrefix(remoteURL, u.InsteadOf) { + continue + } + + // according to spec if there is more than one match, take the logest + if longestMatch == nil || len(longestMatch.InsteadOf) < len(u.InsteadOf) { + longestMatch = u + } + } + + return longestMatch +} + +func (u *URL) ApplyInsteadOf(url string) string { + if !strings.HasPrefix(url, u.InsteadOf) { + return url + } + + return u.Name + url[len(u.InsteadOf):] +} diff --git a/vendor/github.com/go-git/go-git/v5/doc.go b/vendor/github.com/go-git/go-git/v5/doc.go new file mode 100644 index 0000000000..3d817fe9c8 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/doc.go @@ -0,0 +1,10 @@ +// A highly extensible git implementation in pure Go. +// +// go-git aims to reach the completeness of libgit2 or jgit, nowadays covers the +// majority of the plumbing read operations and some of the main write +// operations, but lacks the main porcelain operations such as merges. +// +// It is highly extensible, we have been following the open/close principle in +// its design to facilitate extensions, mainly focusing the efforts on the +// persistence of the objects. +package git diff --git a/vendor/github.com/go-git/go-git/v5/go.mod b/vendor/github.com/go-git/go-git/v5/go.mod new file mode 100644 index 0000000000..402613f0f1 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/go.mod @@ -0,0 +1,31 @@ +module github.com/go-git/go-git/v5 + +require ( + github.com/Microsoft/go-winio v0.4.16 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 + github.com/acomagu/bufpipe v1.0.3 + github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 // indirect + github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 + github.com/emirpasic/gods v1.12.0 + github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect + github.com/gliderlabs/ssh v0.2.2 + github.com/go-git/gcfg v1.5.0 + github.com/go-git/go-billy/v5 v5.3.1 + github.com/go-git/go-git-fixtures/v4 v4.2.1 + github.com/google/go-cmp v0.3.0 + github.com/imdario/mergo v0.3.12 + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 + github.com/jessevdk/go-flags v1.5.0 + github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 + github.com/mitchellh/go-homedir v1.1.0 + github.com/sergi/go-diff v1.1.0 + github.com/xanzy/ssh-agent v0.3.0 + golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b + golang.org/x/net v0.0.0-20210326060303-6b1517762897 + golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79 + golang.org/x/text v0.3.3 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c + gopkg.in/warnings.v0 v0.1.2 // indirect +) + +go 1.13 diff --git a/vendor/github.com/go-git/go-git/v5/go.sum b/vendor/github.com/go-git/go-git/v5/go.sum new file mode 100644 index 0000000000..9227247750 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/go.sum @@ -0,0 +1,103 @@ +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= +github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= +github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= +github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= +github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= +github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.0 h1:KZL1OFdS+afiIjN4hr/zpj5cEtC0OJhbmTA18PsBb8c= +github.com/go-git/go-billy/v5 v5.3.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= +github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= +github.com/go-git/go-git-fixtures/v4 v4.2.1 h1:n9gGL1Ct/yIw+nfsfr8s4+sbhT+Ncu2SubfXjIWgci8= +github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= +github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= +github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= +github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= +github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b h1:7mWr3k41Qtv8XlltBkDkl8LoP3mpSgBW8BUoxtEdbXg= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210326060303-6b1517762897 h1:KrsHThm5nFk34YtATK1LsThyGhGbGe1olrte/HInHvs= +golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79 h1:RX8C8PRZc2hTIod4ds8ij+/4RQX3AqhYj3uOHmyaz4E= +golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go b/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go new file mode 100644 index 0000000000..8facf17ffb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/parser.go @@ -0,0 +1,622 @@ +// Package revision extracts git revision from string +// More information about revision : https://www.kernel.org/pub/software/scm/git/docs/gitrevisions.html +package revision + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "time" +) + +// ErrInvalidRevision is emitted if string doesn't match valid revision +type ErrInvalidRevision struct { + s string +} + +func (e *ErrInvalidRevision) Error() string { + return "Revision invalid : " + e.s +} + +// Revisioner represents a revision component. +// A revision is made of multiple revision components +// obtained after parsing a revision string, +// for instance revision "master~" will be converted in +// two revision components Ref and TildePath +type Revisioner interface { +} + +// Ref represents a reference name : HEAD, master, +type Ref string + +// TildePath represents ~, ~{n} +type TildePath struct { + Depth int +} + +// CaretPath represents ^, ^{n} +type CaretPath struct { + Depth int +} + +// CaretReg represents ^{/foo bar} +type CaretReg struct { + Regexp *regexp.Regexp + Negate bool +} + +// CaretType represents ^{commit} +type CaretType struct { + ObjectType string +} + +// AtReflog represents @{n} +type AtReflog struct { + Depth int +} + +// AtCheckout represents @{-n} +type AtCheckout struct { + Depth int +} + +// AtUpstream represents @{upstream}, @{u} +type AtUpstream struct { + BranchName string +} + +// AtPush represents @{push} +type AtPush struct { + BranchName string +} + +// AtDate represents @{"2006-01-02T15:04:05Z"} +type AtDate struct { + Date time.Time +} + +// ColonReg represents :/foo bar +type ColonReg struct { + Regexp *regexp.Regexp + Negate bool +} + +// ColonPath represents :./ : +type ColonPath struct { + Path string +} + +// ColonStagePath represents ::/ +type ColonStagePath struct { + Path string + Stage int +} + +// Parser represents a parser +// use to tokenize and transform to revisioner chunks +// a given string +type Parser struct { + s *scanner + currentParsedChar struct { + tok token + lit string + } + unreadLastChar bool +} + +// NewParserFromString returns a new instance of parser from a string. +func NewParserFromString(s string) *Parser { + return NewParser(bytes.NewBufferString(s)) +} + +// NewParser returns a new instance of parser. +func NewParser(r io.Reader) *Parser { + return &Parser{s: newScanner(r)} +} + +// scan returns the next token from the underlying scanner +// or the last scanned token if an unscan was requested +func (p *Parser) scan() (token, string, error) { + if p.unreadLastChar { + p.unreadLastChar = false + return p.currentParsedChar.tok, p.currentParsedChar.lit, nil + } + + tok, lit, err := p.s.scan() + + p.currentParsedChar.tok, p.currentParsedChar.lit = tok, lit + + return tok, lit, err +} + +// unscan pushes the previously read token back onto the buffer. +func (p *Parser) unscan() { p.unreadLastChar = true } + +// Parse explode a revision string into revisioner chunks +func (p *Parser) Parse() ([]Revisioner, error) { + var rev Revisioner + var revs []Revisioner + var tok token + var err error + + for { + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case at: + rev, err = p.parseAt() + case tilde: + rev, err = p.parseTilde() + case caret: + rev, err = p.parseCaret() + case colon: + rev, err = p.parseColon() + case eof: + err = p.validateFullRevision(&revs) + + if err != nil { + return []Revisioner{}, err + } + + return revs, nil + default: + p.unscan() + rev, err = p.parseRef() + } + + if err != nil { + return []Revisioner{}, err + } + + revs = append(revs, rev) + } +} + +// validateFullRevision ensures all revisioner chunks make a valid revision +func (p *Parser) validateFullRevision(chunks *[]Revisioner) error { + var hasReference bool + + for i, chunk := range *chunks { + switch chunk.(type) { + case Ref: + if i == 0 { + hasReference = true + } else { + return &ErrInvalidRevision{`reference must be defined once at the beginning`} + } + case AtDate: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} + case AtReflog: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{}, @{}`} + case AtCheckout: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{-}`} + case AtUpstream: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{upstream}, @{upstream}, @{u}, @{u}`} + case AtPush: + if len(*chunks) == 1 || hasReference && len(*chunks) == 2 { + return nil + } + + return &ErrInvalidRevision{`"@" statement is not valid, could be : @{push}, @{push}`} + case TildePath, CaretPath, CaretReg: + if !hasReference { + return &ErrInvalidRevision{`"~" or "^" statement must have a reference defined at the beginning`} + } + case ColonReg: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : :/`} + case ColonPath: + if i == len(*chunks)-1 && hasReference || len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : :`} + case ColonStagePath: + if len(*chunks) == 1 { + return nil + } + + return &ErrInvalidRevision{`":" statement is not valid, could be : ::`} + } + } + + return nil +} + +// parseAt extract @ statements +func (p *Parser) parseAt() (Revisioner, error) { + var tok, nextTok token + var lit, nextLit string + var err error + + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + if tok != obrace { + p.unscan() + + return Ref("HEAD"), nil + } + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, nextLit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == word && (lit == "u" || lit == "upstream") && nextTok == cbrace: + return AtUpstream{}, nil + case tok == word && lit == "push" && nextTok == cbrace: + return AtPush{}, nil + case tok == number && nextTok == cbrace: + n, _ := strconv.Atoi(lit) + + return AtReflog{n}, nil + case tok == minus && nextTok == number: + n, _ := strconv.Atoi(nextLit) + + t, _, err := p.scan() + + if err != nil { + return nil, err + } + + if t != cbrace { + return nil, &ErrInvalidRevision{s: `missing "}" in @{-n} structure`} + } + + return AtCheckout{n}, nil + default: + p.unscan() + + date := lit + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == cbrace: + t, err := time.Parse("2006-01-02T15:04:05Z", date) + + if err != nil { + return nil, &ErrInvalidRevision{fmt.Sprintf(`wrong date "%s" must fit ISO-8601 format : 2006-01-02T15:04:05Z`, date)} + } + + return AtDate{t}, nil + default: + date += lit + } + } + } +} + +// parseTilde extract ~ statements +func (p *Parser) parseTilde() (Revisioner, error) { + var tok token + var lit string + var err error + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == number: + n, _ := strconv.Atoi(lit) + + return TildePath{n}, nil + default: + p.unscan() + return TildePath{1}, nil + } +} + +// parseCaret extract ^ statements +func (p *Parser) parseCaret() (Revisioner, error) { + var tok token + var lit string + var err error + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == obrace: + r, err := p.parseCaretBraces() + + if err != nil { + return nil, err + } + + return r, nil + case tok == number: + n, _ := strconv.Atoi(lit) + + if n > 2 { + return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" found must be 0, 1 or 2 after "^"`, lit)} + } + + return CaretPath{n}, nil + default: + p.unscan() + return CaretPath{1}, nil + } +} + +// parseCaretBraces extract ^{} statements +func (p *Parser) parseCaretBraces() (Revisioner, error) { + var tok, nextTok token + var lit, _ string + start := true + var re string + var negate bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == word && nextTok == cbrace && (lit == "commit" || lit == "tree" || lit == "blob" || lit == "tag" || lit == "object"): + return CaretType{lit}, nil + case re == "" && tok == cbrace: + return CaretType{"tag"}, nil + case re == "" && tok == emark && nextTok == emark: + re += lit + case re == "" && tok == emark && nextTok == minus: + negate = true + case re == "" && tok == emark: + return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`} + case re == "" && tok == slash: + p.unscan() + case tok != slash && start: + return nil, &ErrInvalidRevision{fmt.Sprintf(`"%s" is not a valid revision suffix brace component`, lit)} + case tok != cbrace: + p.unscan() + re += lit + case tok == cbrace: + p.unscan() + + reg, err := regexp.Compile(re) + + if err != nil { + return CaretReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} + } + + return CaretReg{reg, negate}, nil + } + + start = false + } +} + +// parseColon extract : statements +func (p *Parser) parseColon() (Revisioner, error) { + var tok token + var err error + + tok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case slash: + return p.parseColonSlash() + default: + p.unscan() + return p.parseColonDefault() + } +} + +// parseColonSlash extract :/ statements +func (p *Parser) parseColonSlash() (Revisioner, error) { + var tok, nextTok token + var lit string + var re string + var negate bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == emark && nextTok == emark: + re += lit + case re == "" && tok == emark && nextTok == minus: + negate = true + case re == "" && tok == emark: + return nil, &ErrInvalidRevision{s: `revision suffix brace component sequences starting with "/!" others than those defined are reserved`} + case tok == eof: + p.unscan() + reg, err := regexp.Compile(re) + + if err != nil { + return ColonReg{}, &ErrInvalidRevision{fmt.Sprintf(`revision suffix brace component, %s`, err.Error())} + } + + return ColonReg{reg, negate}, nil + default: + p.unscan() + re += lit + } + } +} + +// parseColonDefault extract : statements +func (p *Parser) parseColonDefault() (Revisioner, error) { + var tok token + var lit string + var path string + var stage int + var err error + var n = -1 + + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + nextTok, _, err := p.scan() + + if err != nil { + return nil, err + } + + if tok == number && nextTok == colon { + n, _ = strconv.Atoi(lit) + } + + switch n { + case 0, 1, 2, 3: + stage = n + default: + path += lit + p.unscan() + } + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch { + case tok == eof && n == -1: + return ColonPath{path}, nil + case tok == eof: + return ColonStagePath{path, stage}, nil + default: + path += lit + } + } +} + +// parseRef extract reference name +func (p *Parser) parseRef() (Revisioner, error) { + var tok, prevTok token + var lit, buf string + var endOfRef bool + var err error + + for { + tok, lit, err = p.scan() + + if err != nil { + return nil, err + } + + switch tok { + case eof, at, colon, tilde, caret: + endOfRef = true + } + + err := p.checkRefFormat(tok, lit, prevTok, buf, endOfRef) + + if err != nil { + return "", err + } + + if endOfRef { + p.unscan() + return Ref(buf), nil + } + + buf += lit + prevTok = tok + } +} + +// checkRefFormat ensure reference name follow rules defined here : +// https://git-scm.com/docs/git-check-ref-format +func (p *Parser) checkRefFormat(token token, literal string, previousToken token, buffer string, endOfRef bool) error { + switch token { + case aslash, space, control, qmark, asterisk, obracket: + return &ErrInvalidRevision{fmt.Sprintf(`must not contains "%s"`, literal)} + } + + switch { + case (token == dot || token == slash) && buffer == "": + return &ErrInvalidRevision{fmt.Sprintf(`must not start with "%s"`, literal)} + case previousToken == slash && endOfRef: + return &ErrInvalidRevision{`must not end with "/"`} + case previousToken == dot && endOfRef: + return &ErrInvalidRevision{`must not end with "."`} + case token == dot && previousToken == slash: + return &ErrInvalidRevision{`must not contains "/."`} + case previousToken == dot && token == dot: + return &ErrInvalidRevision{`must not contains ".."`} + case previousToken == slash && token == slash: + return &ErrInvalidRevision{`must not contains consecutively "/"`} + case (token == slash || endOfRef) && len(buffer) > 4 && buffer[len(buffer)-5:] == ".lock": + return &ErrInvalidRevision{"cannot end with .lock"} + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go b/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go new file mode 100644 index 0000000000..c46c21b795 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go @@ -0,0 +1,117 @@ +package revision + +import ( + "bufio" + "io" + "unicode" +) + +// runeCategoryValidator takes a rune as input and +// validates it belongs to a rune category +type runeCategoryValidator func(r rune) bool + +// tokenizeExpression aggregates a series of runes matching check predicate into a single +// string and provides given tokenType as token type +func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r *bufio.Reader) (token, string, error) { + var data []rune + data = append(data, ch) + + for { + c, _, err := r.ReadRune() + + if c == zeroRune { + break + } + + if err != nil { + return tokenError, "", err + } + + if check(c) { + data = append(data, c) + } else { + err := r.UnreadRune() + + if err != nil { + return tokenError, "", err + } + + return tokenType, string(data), nil + } + } + + return tokenType, string(data), nil +} + +var zeroRune = rune(0) + +// scanner represents a lexical scanner. +type scanner struct { + r *bufio.Reader +} + +// newScanner returns a new instance of scanner. +func newScanner(r io.Reader) *scanner { + return &scanner{r: bufio.NewReader(r)} +} + +// Scan extracts tokens and their strings counterpart +// from the reader +func (s *scanner) scan() (token, string, error) { + ch, _, err := s.r.ReadRune() + + if err != nil && err != io.EOF { + return tokenError, "", err + } + + switch ch { + case zeroRune: + return eof, "", nil + case ':': + return colon, string(ch), nil + case '~': + return tilde, string(ch), nil + case '^': + return caret, string(ch), nil + case '.': + return dot, string(ch), nil + case '/': + return slash, string(ch), nil + case '{': + return obrace, string(ch), nil + case '}': + return cbrace, string(ch), nil + case '-': + return minus, string(ch), nil + case '@': + return at, string(ch), nil + case '\\': + return aslash, string(ch), nil + case '?': + return qmark, string(ch), nil + case '*': + return asterisk, string(ch), nil + case '[': + return obracket, string(ch), nil + case '!': + return emark, string(ch), nil + } + + if unicode.IsSpace(ch) { + return space, string(ch), nil + } + + if unicode.IsControl(ch) { + return control, string(ch), nil + } + + if unicode.IsLetter(ch) { + return tokenizeExpression(ch, word, unicode.IsLetter, s.r) + } + + if unicode.IsNumber(ch) { + return tokenizeExpression(ch, number, unicode.IsNumber, s.r) + } + + return tokenError, string(ch), nil +} diff --git a/vendor/github.com/go-git/go-git/v5/internal/revision/token.go b/vendor/github.com/go-git/go-git/v5/internal/revision/token.go new file mode 100644 index 0000000000..abc4048869 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/revision/token.go @@ -0,0 +1,28 @@ +package revision + +// token represents a entity extracted from string parsing +type token int + +const ( + eof token = iota + + aslash + asterisk + at + caret + cbrace + colon + control + dot + emark + minus + number + obrace + obracket + qmark + slash + space + tilde + tokenError + word +) diff --git a/vendor/github.com/go-git/go-git/v5/internal/url/url.go b/vendor/github.com/go-git/go-git/v5/internal/url/url.go new file mode 100644 index 0000000000..14cf133de8 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/internal/url/url.go @@ -0,0 +1,37 @@ +package url + +import ( + "regexp" +) + +var ( + isSchemeRegExp = regexp.MustCompile(`^[^:]+://`) + scpLikeUrlRegExp = regexp.MustCompile(`^(?:(?P[^@]+)@)?(?P[^:\s]+):(?:(?P[0-9]{1,5})(?:\/|:))?(?P[^\\].*\/[^\\].*)$`) +) + +// MatchesScheme returns true if the given string matches a URL-like +// format scheme. +func MatchesScheme(url string) bool { + return isSchemeRegExp.MatchString(url) +} + +// MatchesScpLike returns true if the given string matches an SCP-like +// format scheme. +func MatchesScpLike(url string) bool { + return scpLikeUrlRegExp.MatchString(url) +} + +// FindScpLikeComponents returns the user, host, port and path of the +// given SCP-like URL. +func FindScpLikeComponents(url string) (user, host, port, path string) { + m := scpLikeUrlRegExp.FindStringSubmatch(url) + return m[1], m[2], m[3], m[4] +} + +// IsLocalEndpoint returns true if the given URL string specifies a +// local file endpoint. For example, on a Linux machine, +// `/home/user/src/go-git` would match as a local endpoint, but +// `https://github.com/src-d/go-git` would not. +func IsLocalEndpoint(url string) bool { + return !MatchesScheme(url) && !MatchesScpLike(url) +} diff --git a/vendor/github.com/go-git/go-git/v5/object_walker.go b/vendor/github.com/go-git/go-git/v5/object_walker.go new file mode 100644 index 0000000000..3fcdd2999c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/object_walker.go @@ -0,0 +1,104 @@ +package git + +import ( + "fmt" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/storage" +) + +type objectWalker struct { + Storer storage.Storer + // seen is the set of objects seen in the repo. + // seen map can become huge if walking over large + // repos. Thus using struct{} as the value type. + seen map[plumbing.Hash]struct{} +} + +func newObjectWalker(s storage.Storer) *objectWalker { + return &objectWalker{s, map[plumbing.Hash]struct{}{}} +} + +// walkAllRefs walks all (hash) references from the repo. +func (p *objectWalker) walkAllRefs() error { + // Walk over all the references in the repo. + it, err := p.Storer.IterReferences() + if err != nil { + return err + } + defer it.Close() + err = it.ForEach(func(ref *plumbing.Reference) error { + // Exit this iteration early for non-hash references. + if ref.Type() != plumbing.HashReference { + return nil + } + return p.walkObjectTree(ref.Hash()) + }) + return err +} + +func (p *objectWalker) isSeen(hash plumbing.Hash) bool { + _, seen := p.seen[hash] + return seen +} + +func (p *objectWalker) add(hash plumbing.Hash) { + p.seen[hash] = struct{}{} +} + +// walkObjectTree walks over all objects and remembers references +// to them in the objectWalker. This is used instead of the revlist +// walks because memory usage is tight with huge repos. +func (p *objectWalker) walkObjectTree(hash plumbing.Hash) error { + // Check if we have already seen, and mark this object + if p.isSeen(hash) { + return nil + } + p.add(hash) + // Fetch the object. + obj, err := object.GetObject(p.Storer, hash) + if err != nil { + return fmt.Errorf("Getting object %s failed: %v", hash, err) + } + // Walk all children depending on object type. + switch obj := obj.(type) { + case *object.Commit: + err = p.walkObjectTree(obj.TreeHash) + if err != nil { + return err + } + for _, h := range obj.ParentHashes { + err = p.walkObjectTree(h) + if err != nil { + return err + } + } + case *object.Tree: + for i := range obj.Entries { + // Shortcut for blob objects: + // 'or' the lower bits of a mode and check that it + // it matches a filemode.Executable. The type information + // is in the higher bits, but this is the cleanest way + // to handle plain files with different modes. + // Other non-tree objects are somewhat rare, so they + // are not special-cased. + if obj.Entries[i].Mode|0755 == filemode.Executable { + p.add(obj.Entries[i].Hash) + continue + } + // Normal walk for sub-trees (and symlinks etc). + err = p.walkObjectTree(obj.Entries[i].Hash) + if err != nil { + return err + } + } + case *object.Tag: + return p.walkObjectTree(obj.Target) + default: + // Error out on unhandled object types. + return fmt.Errorf("Unknown object %X %s %T\n", obj.ID(), obj.Type(), obj) + } + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/options.go b/vendor/github.com/go-git/go-git/v5/options.go new file mode 100644 index 0000000000..70687965bb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/options.go @@ -0,0 +1,634 @@ +package git + +import ( + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/ProtonMail/go-crypto/openpgp" + "github.com/go-git/go-git/v5/config" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/object" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband" + "github.com/go-git/go-git/v5/plumbing/transport" +) + +// SubmoduleRescursivity defines how depth will affect any submodule recursive +// operation. +type SubmoduleRescursivity uint + +const ( + // DefaultRemoteName name of the default Remote, just like git command. + DefaultRemoteName = "origin" + + // NoRecurseSubmodules disables the recursion for a submodule operation. + NoRecurseSubmodules SubmoduleRescursivity = 0 + // DefaultSubmoduleRecursionDepth allow recursion in a submodule operation. + DefaultSubmoduleRecursionDepth SubmoduleRescursivity = 10 +) + +var ( + ErrMissingURL = errors.New("URL field is required") +) + +// CloneOptions describes how a clone should be performed. +type CloneOptions struct { + // The (possibly remote) repository URL to clone from. + URL string + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Name of the remote to be added, by default `origin`. + RemoteName string + // Remote branch to clone. + ReferenceName plumbing.ReferenceName + // Fetch only ReferenceName if true. + SingleBranch bool + // No checkout of HEAD after clone if true. + NoCheckout bool + // Limit fetching to the specified number of commits. + Depth int + // RecurseSubmodules after the clone is created, initialize all submodules + // within, using their default settings. This option is ignored if the + // cloned repository does not have a worktree. + RecurseSubmodules SubmoduleRescursivity + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress + // Tags describe how the tags will be fetched from the remote repository, + // by default is AllTags. + Tags TagMode + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte +} + +// Validate validates the fields and sets the default values. +func (o *CloneOptions) Validate() error { + if o.URL == "" { + return ErrMissingURL + } + + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.ReferenceName == "" { + o.ReferenceName = plumbing.HEAD + } + + if o.Tags == InvalidTagMode { + o.Tags = AllTags + } + + return nil +} + +// PullOptions describes how a pull should be performed. +type PullOptions struct { + // Name of the remote to be pulled. If empty, uses the default. + RemoteName string + // Remote branch to clone. If empty, uses HEAD. + ReferenceName plumbing.ReferenceName + // Fetch only ReferenceName if true. + SingleBranch bool + // Limit fetching to the specified number of commits. + Depth int + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // RecurseSubmodules controls if new commits of all populated submodules + // should be fetched too. + RecurseSubmodules SubmoduleRescursivity + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress + // Force allows the pull to update a local branch even when the remote + // branch does not descend from it. + Force bool + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte +} + +// Validate validates the fields and sets the default values. +func (o *PullOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.ReferenceName == "" { + o.ReferenceName = plumbing.HEAD + } + + return nil +} + +type TagMode int + +const ( + InvalidTagMode TagMode = iota + // TagFollowing any tag that points into the histories being fetched is also + // fetched. TagFollowing requires a server with `include-tag` capability + // in order to fetch the annotated tags objects. + TagFollowing + // AllTags fetch all tags from the remote (i.e., fetch remote tags + // refs/tags/* into local tags with the same name) + AllTags + //NoTags fetch no tags from the remote at all + NoTags +) + +// FetchOptions describes how a fetch should be performed +type FetchOptions struct { + // Name of the remote to fetch from. Defaults to origin. + RemoteName string + RefSpecs []config.RefSpec + // Depth limit fetching to the specified number of commits from the tip of + // each remote branch history. + Depth int + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored and the capability (if supported) + // no-progress, is sent to the server to avoid send this information. + Progress sideband.Progress + // Tags describe how the tags will be fetched from the remote repository, + // by default is TagFollowing. + Tags TagMode + // Force allows the fetch to update a local branch even when the remote + // branch does not descend from it. + Force bool + // InsecureSkipTLS skips ssl verify if protocol is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte +} + +// Validate validates the fields and sets the default values. +func (o *FetchOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if o.Tags == InvalidTagMode { + o.Tags = TagFollowing + } + + for _, r := range o.RefSpecs { + if err := r.Validate(); err != nil { + return err + } + } + + return nil +} + +// PushOptions describes how a push should be performed. +type PushOptions struct { + // RemoteName is the name of the remote to be pushed to. + RemoteName string + // RefSpecs specify what destination ref to update with what source + // object. A refspec with empty src can be used to delete a reference. + RefSpecs []config.RefSpec + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // Progress is where the human readable information sent by the server is + // stored, if nil nothing is stored. + Progress sideband.Progress + // Prune specify that remote refs that match given RefSpecs and that do + // not exist locally will be removed. + Prune bool + // Force allows the push to update a remote branch even when the local + // branch does not descend from it. + Force bool + // InsecureSkipTLS skips ssl verify if protocal is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte + // RequireRemoteRefs only allows a remote ref to be updated if its current + // value is the one specified here. + RequireRemoteRefs []config.RefSpec +} + +// Validate validates the fields and sets the default values. +func (o *PushOptions) Validate() error { + if o.RemoteName == "" { + o.RemoteName = DefaultRemoteName + } + + if len(o.RefSpecs) == 0 { + o.RefSpecs = []config.RefSpec{ + config.RefSpec(config.DefaultPushRefSpec), + } + } + + for _, r := range o.RefSpecs { + if err := r.Validate(); err != nil { + return err + } + } + + return nil +} + +// SubmoduleUpdateOptions describes how a submodule update should be performed. +type SubmoduleUpdateOptions struct { + // Init, if true initializes the submodules recorded in the index. + Init bool + // NoFetch tell to the update command to not fetch new objects from the + // remote site. + NoFetch bool + // RecurseSubmodules the update is performed not only in the submodules of + // the current repository but also in any nested submodules inside those + // submodules (and so on). Until the SubmoduleRescursivity is reached. + RecurseSubmodules SubmoduleRescursivity + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod +} + +var ( + ErrBranchHashExclusive = errors.New("Branch and Hash are mutually exclusive") + ErrCreateRequiresBranch = errors.New("Branch is mandatory when Create is used") +) + +// CheckoutOptions describes how a checkout operation should be performed. +type CheckoutOptions struct { + // Hash is the hash of the commit to be checked out. If used, HEAD will be + // in detached mode. If Create is not used, Branch and Hash are mutually + // exclusive. + Hash plumbing.Hash + // Branch to be checked out, if Branch and Hash are empty is set to `master`. + Branch plumbing.ReferenceName + // Create a new branch named Branch and start it at Hash. + Create bool + // Force, if true when switching branches, proceed even if the index or the + // working tree differs from HEAD. This is used to throw away local changes + Force bool + // Keep, if true when switching branches, local changes (the index or the + // working tree changes) will be kept so that they can be committed to the + // target branch. Force and Keep are mutually exclusive, should not be both + // set to true. + Keep bool +} + +// Validate validates the fields and sets the default values. +func (o *CheckoutOptions) Validate() error { + if !o.Create && !o.Hash.IsZero() && o.Branch != "" { + return ErrBranchHashExclusive + } + + if o.Create && o.Branch == "" { + return ErrCreateRequiresBranch + } + + if o.Branch == "" { + o.Branch = plumbing.Master + } + + return nil +} + +// ResetMode defines the mode of a reset operation. +type ResetMode int8 + +const ( + // MixedReset resets the index but not the working tree (i.e., the changed + // files are preserved but not marked for commit) and reports what has not + // been updated. This is the default action. + MixedReset ResetMode = iota + // HardReset resets the index and working tree. Any changes to tracked files + // in the working tree are discarded. + HardReset + // MergeReset resets the index and updates the files in the working tree + // that are different between Commit and HEAD, but keeps those which are + // different between the index and working tree (i.e. which have changes + // which have not been added). + // + // If a file that is different between Commit and the index has unstaged + // changes, reset is aborted. + MergeReset + // SoftReset does not touch the index file or the working tree at all (but + // resets the head to , just like all modes do). This leaves all + // your changed files "Changes to be committed", as git status would put it. + SoftReset +) + +// ResetOptions describes how a reset operation should be performed. +type ResetOptions struct { + // Commit, if commit is present set the current branch head (HEAD) to it. + Commit plumbing.Hash + // Mode, form resets the current branch head to Commit and possibly updates + // the index (resetting it to the tree of Commit) and the working tree + // depending on Mode. If empty MixedReset is used. + Mode ResetMode +} + +// Validate validates the fields and sets the default values. +func (o *ResetOptions) Validate(r *Repository) error { + if o.Commit == plumbing.ZeroHash { + ref, err := r.Head() + if err != nil { + return err + } + + o.Commit = ref.Hash() + } + + return nil +} + +type LogOrder int8 + +const ( + LogOrderDefault LogOrder = iota + LogOrderDFS + LogOrderDFSPost + LogOrderBSF + LogOrderCommitterTime +) + +// LogOptions describes how a log action should be performed. +type LogOptions struct { + // When the From option is set the log will only contain commits + // reachable from it. If this option is not set, HEAD will be used as + // the default From. + From plumbing.Hash + + // The default traversal algorithm is Depth-first search + // set Order=LogOrderCommitterTime for ordering by committer time (more compatible with `git log`) + // set Order=LogOrderBSF for Breadth-first search + Order LogOrder + + // Show only those commits in which the specified file was inserted/updated. + // It is equivalent to running `git log -- `. + // this field is kept for compatility, it can be replaced with PathFilter + FileName *string + + // Filter commits based on the path of files that are updated + // takes file path as argument and should return true if the file is desired + // It can be used to implement `git log -- ` + // either is a file path, or directory path, or a regexp of file/directory path + PathFilter func(string) bool + + // Pretend as if all the refs in refs/, along with HEAD, are listed on the command line as . + // It is equivalent to running `git log --all`. + // If set on true, the From option will be ignored. + All bool + + // Show commits more recent than a specific date. + // It is equivalent to running `git log --since ` or `git log --after `. + Since *time.Time + + // Show commits older than a specific date. + // It is equivalent to running `git log --until ` or `git log --before `. + Until *time.Time +} + +var ( + ErrMissingAuthor = errors.New("author field is required") +) + +// AddOptions describes how an `add` operation should be performed +type AddOptions struct { + // All equivalent to `git add -A`, update the index not only where the + // working tree has a file matching `Path` but also where the index already + // has an entry. This adds, modifies, and removes index entries to match the + // working tree. If no `Path` nor `Glob` is given when `All` option is + // used, all files in the entire working tree are updated. + All bool + // Path is the exact filepath to the file or directory to be added. + Path string + // Glob adds all paths, matching pattern, to the index. If pattern matches a + // directory path, all directory contents are added to the index recursively. + Glob string +} + +// Validate validates the fields and sets the default values. +func (o *AddOptions) Validate(r *Repository) error { + if o.Path != "" && o.Glob != "" { + return fmt.Errorf("fields Path and Glob are mutual exclusive") + } + + return nil +} + +// CommitOptions describes how a commit operation should be performed. +type CommitOptions struct { + // All automatically stage files that have been modified and deleted, but + // new files you have not told Git about are not affected. + All bool + // Author is the author's signature of the commit. If Author is empty the + // Name and Email is read from the config, and time.Now it's used as When. + Author *object.Signature + // Committer is the committer's signature of the commit. If Committer is + // nil the Author signature is used. + Committer *object.Signature + // Parents are the parents commits for the new commit, by default when + // len(Parents) is zero, the hash of HEAD reference is used. + Parents []plumbing.Hash + // SignKey denotes a key to sign the commit with. A nil value here means the + // commit will not be signed. The private key must be present and already + // decrypted. + SignKey *openpgp.Entity +} + +// Validate validates the fields and sets the default values. +func (o *CommitOptions) Validate(r *Repository) error { + if o.Author == nil { + if err := o.loadConfigAuthorAndCommitter(r); err != nil { + return err + } + } + + if o.Committer == nil { + o.Committer = o.Author + } + + if len(o.Parents) == 0 { + head, err := r.Head() + if err != nil && err != plumbing.ErrReferenceNotFound { + return err + } + + if head != nil { + o.Parents = []plumbing.Hash{head.Hash()} + } + } + + return nil +} + +func (o *CommitOptions) loadConfigAuthorAndCommitter(r *Repository) error { + cfg, err := r.ConfigScoped(config.SystemScope) + if err != nil { + return err + } + + if o.Author == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { + o.Author = &object.Signature{ + Name: cfg.Author.Name, + Email: cfg.Author.Email, + When: time.Now(), + } + } + + if o.Committer == nil && cfg.Committer.Email != "" && cfg.Committer.Name != "" { + o.Committer = &object.Signature{ + Name: cfg.Committer.Name, + Email: cfg.Committer.Email, + When: time.Now(), + } + } + + if o.Author == nil && cfg.User.Email != "" && cfg.User.Name != "" { + o.Author = &object.Signature{ + Name: cfg.User.Name, + Email: cfg.User.Email, + When: time.Now(), + } + } + + if o.Author == nil { + return ErrMissingAuthor + } + + return nil +} + +var ( + ErrMissingName = errors.New("name field is required") + ErrMissingTagger = errors.New("tagger field is required") + ErrMissingMessage = errors.New("message field is required") +) + +// CreateTagOptions describes how a tag object should be created. +type CreateTagOptions struct { + // Tagger defines the signature of the tag creator. If Tagger is empty the + // Name and Email is read from the config, and time.Now it's used as When. + Tagger *object.Signature + // Message defines the annotation of the tag. It is canonicalized during + // validation into the format expected by git - no leading whitespace and + // ending in a newline. + Message string + // SignKey denotes a key to sign the tag with. A nil value here means the tag + // will not be signed. The private key must be present and already decrypted. + SignKey *openpgp.Entity +} + +// Validate validates the fields and sets the default values. +func (o *CreateTagOptions) Validate(r *Repository, hash plumbing.Hash) error { + if o.Tagger == nil { + if err := o.loadConfigTagger(r); err != nil { + return err + } + } + + if o.Message == "" { + return ErrMissingMessage + } + + // Canonicalize the message into the expected message format. + o.Message = strings.TrimSpace(o.Message) + "\n" + + return nil +} + +func (o *CreateTagOptions) loadConfigTagger(r *Repository) error { + cfg, err := r.ConfigScoped(config.SystemScope) + if err != nil { + return err + } + + if o.Tagger == nil && cfg.Author.Email != "" && cfg.Author.Name != "" { + o.Tagger = &object.Signature{ + Name: cfg.Author.Name, + Email: cfg.Author.Email, + When: time.Now(), + } + } + + if o.Tagger == nil && cfg.User.Email != "" && cfg.User.Name != "" { + o.Tagger = &object.Signature{ + Name: cfg.User.Name, + Email: cfg.User.Email, + When: time.Now(), + } + } + + if o.Tagger == nil { + return ErrMissingTagger + } + + return nil +} + +// ListOptions describes how a remote list should be performed. +type ListOptions struct { + // Auth credentials, if required, to use with the remote repository. + Auth transport.AuthMethod + // InsecureSkipTLS skips ssl verify if protocal is https + InsecureSkipTLS bool + // CABundle specify additional ca bundle with system cert pool + CABundle []byte +} + +// CleanOptions describes how a clean should be performed. +type CleanOptions struct { + Dir bool +} + +// GrepOptions describes how a grep should be performed. +type GrepOptions struct { + // Patterns are compiled Regexp objects to be matched. + Patterns []*regexp.Regexp + // InvertMatch selects non-matching lines. + InvertMatch bool + // CommitHash is the hash of the commit from which worktree should be derived. + CommitHash plumbing.Hash + // ReferenceName is the branch or tag name from which worktree should be derived. + ReferenceName plumbing.ReferenceName + // PathSpecs are compiled Regexp objects of pathspec to use in the matching. + PathSpecs []*regexp.Regexp +} + +var ( + ErrHashOrReference = errors.New("ambiguous options, only one of CommitHash or ReferenceName can be passed") +) + +// Validate validates the fields and sets the default values. +func (o *GrepOptions) Validate(w *Worktree) error { + if !o.CommitHash.IsZero() && o.ReferenceName != "" { + return ErrHashOrReference + } + + // If none of CommitHash and ReferenceName are provided, set commit hash of + // the repository's head. + if o.CommitHash.IsZero() && o.ReferenceName == "" { + ref, err := w.r.Head() + if err != nil { + return err + } + o.CommitHash = ref.Hash() + } + + return nil +} + +// PlainOpenOptions describes how opening a plain repository should be +// performed. +type PlainOpenOptions struct { + // DetectDotGit defines whether parent directories should be + // walked until a .git directory or file is found. + DetectDotGit bool + // Enable .git/commondir support (see https://git-scm.com/docs/gitrepository-layout#Documentation/gitrepository-layout.txt). + // NOTE: This option will only work with the filesystem storage. + EnableDotGitCommonDir bool +} + +// Validate validates the fields and sets the default values. +func (o *PlainOpenOptions) Validate() error { return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go new file mode 100644 index 0000000000..acaf195203 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/cache/buffer_lru.go @@ -0,0 +1,98 @@ +package cache + +import ( + "container/list" + "sync" +) + +// BufferLRU implements an object cache with an LRU eviction policy and a +// maximum size (measured in object size). +type BufferLRU struct { + MaxSize FileSize + + actualSize FileSize + ll *list.List + cache map[int64]*list.Element + mut sync.Mutex +} + +// NewBufferLRU creates a new BufferLRU with the given maximum size. The maximum +// size will never be exceeded. +func NewBufferLRU(maxSize FileSize) *BufferLRU { + return &BufferLRU{MaxSize: maxSize} +} + +// NewBufferLRUDefault creates a new BufferLRU with the default cache size. +func NewBufferLRUDefault() *BufferLRU { + return &BufferLRU{MaxSize: DefaultMaxSize} +} + +type buffer struct { + Key int64 + Slice []byte +} + +// Put puts a buffer into the cache. If the buffer is already in the cache, it +// will be marked as used. Otherwise, it will be inserted. A buffers might +// be evicted to make room for the new one. +func (c *BufferLRU) Put(key int64, slice []byte) { + c.mut.Lock() + defer c.mut.Unlock() + + if c.cache == nil { + c.actualSize = 0 + c.cache = make(map[int64]*list.Element, 1000) + c.ll = list.New() + } + + bufSize := FileSize(len(slice)) + if ee, ok := c.cache[key]; ok { + oldBuf := ee.Value.(buffer) + // in this case bufSize is a delta: new size - old size + bufSize -= FileSize(len(oldBuf.Slice)) + c.ll.MoveToFront(ee) + ee.Value = buffer{key, slice} + } else { + if bufSize > c.MaxSize { + return + } + ee := c.ll.PushFront(buffer{key, slice}) + c.cache[key] = ee + } + + c.actualSize += bufSize + for c.actualSize > c.MaxSize { + last := c.ll.Back() + lastObj := last.Value.(buffer) + lastSize := FileSize(len(lastObj.Slice)) + + c.ll.Remove(last) + delete(c.cache, lastObj.Key) + c.actualSize -= lastSize + } +} + +// Get returns a buffer by its key. It marks the buffer as used. If the buffer +// is not in the cache, (nil, false) will be returned. +func (c *BufferLRU) Get(key int64) ([]byte, bool) { + c.mut.Lock() + defer c.mut.Unlock() + + ee, ok := c.cache[key] + if !ok { + return nil, false + } + + c.ll.MoveToFront(ee) + return ee.Value.(buffer).Slice, true +} + +// Clear the content of this buffer cache. +func (c *BufferLRU) Clear() { + c.mut.Lock() + defer c.mut.Unlock() + + c.ll = nil + c.cache = nil + c.actualSize = 0 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go new file mode 100644 index 0000000000..7b0d0c76bb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/cache/common.go @@ -0,0 +1,39 @@ +package cache + +import "github.com/go-git/go-git/v5/plumbing" + +const ( + Byte FileSize = 1 << (iota * 10) + KiByte + MiByte + GiByte +) + +type FileSize int64 + +const DefaultMaxSize FileSize = 96 * MiByte + +// Object is an interface to a object cache. +type Object interface { + // Put puts the given object into the cache. Whether this object will + // actually be put into the cache or not is implementation specific. + Put(o plumbing.EncodedObject) + // Get gets an object from the cache given its hash. The second return value + // is true if the object was returned, and false otherwise. + Get(k plumbing.Hash) (plumbing.EncodedObject, bool) + // Clear clears every object from the cache. + Clear() +} + +// Buffer is an interface to a buffer cache. +type Buffer interface { + // Put puts a buffer into the cache. If the buffer is already in the cache, + // it will be marked as used. Otherwise, it will be inserted. Buffer might + // be evicted to make room for the new one. + Put(key int64, slice []byte) + // Get returns a buffer by its key. It marks the buffer as used. If the + // buffer is not in the cache, (nil, false) will be returned. + Get(key int64) ([]byte, bool) + // Clear clears every object from the cache. + Clear() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go b/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go new file mode 100644 index 0000000000..c50d0d1e6c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/cache/object_lru.go @@ -0,0 +1,101 @@ +package cache + +import ( + "container/list" + "sync" + + "github.com/go-git/go-git/v5/plumbing" +) + +// ObjectLRU implements an object cache with an LRU eviction policy and a +// maximum size (measured in object size). +type ObjectLRU struct { + MaxSize FileSize + + actualSize FileSize + ll *list.List + cache map[interface{}]*list.Element + mut sync.Mutex +} + +// NewObjectLRU creates a new ObjectLRU with the given maximum size. The maximum +// size will never be exceeded. +func NewObjectLRU(maxSize FileSize) *ObjectLRU { + return &ObjectLRU{MaxSize: maxSize} +} + +// NewObjectLRUDefault creates a new ObjectLRU with the default cache size. +func NewObjectLRUDefault() *ObjectLRU { + return &ObjectLRU{MaxSize: DefaultMaxSize} +} + +// Put puts an object into the cache. If the object is already in the cache, it +// will be marked as used. Otherwise, it will be inserted. A single object might +// be evicted to make room for the new object. +func (c *ObjectLRU) Put(obj plumbing.EncodedObject) { + c.mut.Lock() + defer c.mut.Unlock() + + if c.cache == nil { + c.actualSize = 0 + c.cache = make(map[interface{}]*list.Element, 1000) + c.ll = list.New() + } + + objSize := FileSize(obj.Size()) + key := obj.Hash() + if ee, ok := c.cache[key]; ok { + oldObj := ee.Value.(plumbing.EncodedObject) + // in this case objSize is a delta: new size - old size + objSize -= FileSize(oldObj.Size()) + c.ll.MoveToFront(ee) + ee.Value = obj + } else { + if objSize > c.MaxSize { + return + } + ee := c.ll.PushFront(obj) + c.cache[key] = ee + } + + c.actualSize += objSize + for c.actualSize > c.MaxSize { + last := c.ll.Back() + if last == nil { + c.actualSize = 0 + break + } + + lastObj := last.Value.(plumbing.EncodedObject) + lastSize := FileSize(lastObj.Size()) + + c.ll.Remove(last) + delete(c.cache, lastObj.Hash()) + c.actualSize -= lastSize + } +} + +// Get returns an object by its hash. It marks the object as used. If the object +// is not in the cache, (nil, false) will be returned. +func (c *ObjectLRU) Get(k plumbing.Hash) (plumbing.EncodedObject, bool) { + c.mut.Lock() + defer c.mut.Unlock() + + ee, ok := c.cache[k] + if !ok { + return nil, false + } + + c.ll.MoveToFront(ee) + return ee.Value.(plumbing.EncodedObject), true +} + +// Clear the content of this object cache. +func (c *ObjectLRU) Clear() { + c.mut.Lock() + defer c.mut.Unlock() + + c.ll = nil + c.cache = nil + c.actualSize = 0 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go b/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go new file mode 100644 index 0000000000..2cd74bdc1a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/color/color.go @@ -0,0 +1,38 @@ +package color + +// TODO read colors from a github.com/go-git/go-git/plumbing/format/config.Config struct +// TODO implement color parsing, see https://github.com/git/git/blob/v2.26.2/color.c + +// Colors. See https://github.com/git/git/blob/v2.26.2/color.h#L24-L53. +const ( + Normal = "" + Reset = "\033[m" + Bold = "\033[1m" + Red = "\033[31m" + Green = "\033[32m" + Yellow = "\033[33m" + Blue = "\033[34m" + Magenta = "\033[35m" + Cyan = "\033[36m" + BoldRed = "\033[1;31m" + BoldGreen = "\033[1;32m" + BoldYellow = "\033[1;33m" + BoldBlue = "\033[1;34m" + BoldMagenta = "\033[1;35m" + BoldCyan = "\033[1;36m" + FaintRed = "\033[2;31m" + FaintGreen = "\033[2;32m" + FaintYellow = "\033[2;33m" + FaintBlue = "\033[2;34m" + FaintMagenta = "\033[2;35m" + FaintCyan = "\033[2;36m" + BgRed = "\033[41m" + BgGreen = "\033[42m" + BgYellow = "\033[43m" + BgBlue = "\033[44m" + BgMagenta = "\033[45m" + BgCyan = "\033[46m" + Faint = "\033[2m" + FaintItalic = "\033[2;3m" + Reverse = "\033[7m" +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/error.go new file mode 100644 index 0000000000..a3ebed3f6c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/error.go @@ -0,0 +1,35 @@ +package plumbing + +import "fmt" + +type PermanentError struct { + Err error +} + +func NewPermanentError(err error) *PermanentError { + if err == nil { + return nil + } + + return &PermanentError{Err: err} +} + +func (e *PermanentError) Error() string { + return fmt.Sprintf("permanent client error: %s", e.Err.Error()) +} + +type UnexpectedError struct { + Err error +} + +func NewUnexpectedError(err error) *UnexpectedError { + if err == nil { + return nil + } + + return &UnexpectedError{Err: err} +} + +func (e *UnexpectedError) Error() string { + return fmt.Sprintf("unexpected client error: %s", e.Err.Error()) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go new file mode 100644 index 0000000000..b848a97961 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/filemode/filemode.go @@ -0,0 +1,188 @@ +package filemode + +import ( + "encoding/binary" + "fmt" + "os" + "strconv" +) + +// A FileMode represents the kind of tree entries used by git. It +// resembles regular file systems modes, although FileModes are +// considerably simpler (there are not so many), and there are some, +// like Submodule that has no file system equivalent. +type FileMode uint32 + +const ( + // Empty is used as the FileMode of tree elements when comparing + // trees in the following situations: + // + // - the mode of tree elements before their creation. - the mode of + // tree elements after their deletion. - the mode of unmerged + // elements when checking the index. + // + // Empty has no file system equivalent. As Empty is the zero value + // of FileMode, it is also returned by New and + // NewFromOsNewFromOSFileMode along with an error, when they fail. + Empty FileMode = 0 + // Dir represent a Directory. + Dir FileMode = 0040000 + // Regular represent non-executable files. Please note this is not + // the same as golang regular files, which include executable files. + Regular FileMode = 0100644 + // Deprecated represent non-executable files with the group writable + // bit set. This mode was supported by the first versions of git, + // but it has been deprecated nowadays. This library uses them + // internally, so you can read old packfiles, but will treat them as + // Regulars when interfacing with the outside world. This is the + // standard git behaviour. + Deprecated FileMode = 0100664 + // Executable represents executable files. + Executable FileMode = 0100755 + // Symlink represents symbolic links to files. + Symlink FileMode = 0120000 + // Submodule represents git submodules. This mode has no file system + // equivalent. + Submodule FileMode = 0160000 +) + +// New takes the octal string representation of a FileMode and returns +// the FileMode and a nil error. If the string can not be parsed to a +// 32 bit unsigned octal number, it returns Empty and the parsing error. +// +// Example: "40000" means Dir, "100644" means Regular. +// +// Please note this function does not check if the returned FileMode +// is valid in git or if it is malformed. For instance, "1" will +// return the malformed FileMode(1) and a nil error. +func New(s string) (FileMode, error) { + n, err := strconv.ParseUint(s, 8, 32) + if err != nil { + return Empty, err + } + + return FileMode(n), nil +} + +// NewFromOSFileMode returns the FileMode used by git to represent +// the provided file system modes and a nil error on success. If the +// file system mode cannot be mapped to any valid git mode (as with +// sockets or named pipes), it will return Empty and an error. +// +// Note that some git modes cannot be generated from os.FileModes, like +// Deprecated and Submodule; while Empty will be returned, along with an +// error, only when the method fails. +func NewFromOSFileMode(m os.FileMode) (FileMode, error) { + if m.IsRegular() { + if isSetTemporary(m) { + return Empty, fmt.Errorf("no equivalent git mode for %s", m) + } + if isSetCharDevice(m) { + return Empty, fmt.Errorf("no equivalent git mode for %s", m) + } + if isSetUserExecutable(m) { + return Executable, nil + } + return Regular, nil + } + + if m.IsDir() { + return Dir, nil + } + + if isSetSymLink(m) { + return Symlink, nil + } + + return Empty, fmt.Errorf("no equivalent git mode for %s", m) +} + +func isSetCharDevice(m os.FileMode) bool { + return m&os.ModeCharDevice != 0 +} + +func isSetTemporary(m os.FileMode) bool { + return m&os.ModeTemporary != 0 +} + +func isSetUserExecutable(m os.FileMode) bool { + return m&0100 != 0 +} + +func isSetSymLink(m os.FileMode) bool { + return m&os.ModeSymlink != 0 +} + +// Bytes return a slice of 4 bytes with the mode in little endian +// encoding. +func (m FileMode) Bytes() []byte { + ret := make([]byte, 4) + binary.LittleEndian.PutUint32(ret, uint32(m)) + return ret +} + +// IsMalformed returns if the FileMode should not appear in a git packfile, +// this is: Empty and any other mode not mentioned as a constant in this +// package. +func (m FileMode) IsMalformed() bool { + return m != Dir && + m != Regular && + m != Deprecated && + m != Executable && + m != Symlink && + m != Submodule +} + +// String returns the FileMode as a string in the standatd git format, +// this is, an octal number padded with ceros to 7 digits. Malformed +// modes are printed in that same format, for easier debugging. +// +// Example: Regular is "0100644", Empty is "0000000". +func (m FileMode) String() string { + return fmt.Sprintf("%07o", uint32(m)) +} + +// IsRegular returns if the FileMode represents that of a regular file, +// this is, either Regular or Deprecated. Please note that Executable +// are not regular even though in the UNIX tradition, they usually are: +// See the IsFile method. +func (m FileMode) IsRegular() bool { + return m == Regular || + m == Deprecated +} + +// IsFile returns if the FileMode represents that of a file, this is, +// Regular, Deprecated, Executable or Link. +func (m FileMode) IsFile() bool { + return m == Regular || + m == Deprecated || + m == Executable || + m == Symlink +} + +// ToOSFileMode returns the os.FileMode to be used when creating file +// system elements with the given git mode and a nil error on success. +// +// When the provided mode cannot be mapped to a valid file system mode +// (e.g. Submodule) it returns os.FileMode(0) and an error. +// +// The returned file mode does not take into account the umask. +func (m FileMode) ToOSFileMode() (os.FileMode, error) { + switch m { + case Dir: + return os.ModePerm | os.ModeDir, nil + case Submodule: + return os.ModePerm | os.ModeDir, nil + case Regular: + return os.FileMode(0644), nil + // Deprecated is no longer allowed: treated as a Regular instead + case Deprecated: + return os.FileMode(0644), nil + case Executable: + return os.FileMode(0755), nil + case Symlink: + return os.ModePerm | os.ModeSymlink, nil + } + + return os.FileMode(0), fmt.Errorf("malformed mode (%s)", m) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go new file mode 100644 index 0000000000..6d689ea1e0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/common.go @@ -0,0 +1,109 @@ +package config + +// New creates a new config instance. +func New() *Config { + return &Config{} +} + +// Config contains all the sections, comments and includes from a config file. +type Config struct { + Comment *Comment + Sections Sections + Includes Includes +} + +// Includes is a list of Includes in a config file. +type Includes []*Include + +// Include is a reference to an included config file. +type Include struct { + Path string + Config *Config +} + +// Comment string without the prefix '#' or ';'. +type Comment string + +const ( + // NoSubsection token is passed to Config.Section and Config.SetSection to + // represent the absence of a section. + NoSubsection = "" +) + +// Section returns a existing section with the given name or creates a new one. +func (c *Config) Section(name string) *Section { + for i := len(c.Sections) - 1; i >= 0; i-- { + s := c.Sections[i] + if s.IsName(name) { + return s + } + } + + s := &Section{Name: name} + c.Sections = append(c.Sections, s) + return s +} + +// HasSection checks if the Config has a section with the specified name. +func (c *Config) HasSection(name string) bool { + for _, s := range c.Sections { + if s.IsName(name) { + return true + } + } + return false +} + +// RemoveSection removes a section from a config file. +func (c *Config) RemoveSection(name string) *Config { + result := Sections{} + for _, s := range c.Sections { + if !s.IsName(name) { + result = append(result, s) + } + } + + c.Sections = result + return c +} + +// RemoveSubsection remove a subsection from a config file. +func (c *Config) RemoveSubsection(section string, subsection string) *Config { + for _, s := range c.Sections { + if s.IsName(section) { + result := Subsections{} + for _, ss := range s.Subsections { + if !ss.IsName(subsection) { + result = append(result, ss) + } + } + s.Subsections = result + } + } + + return c +} + +// AddOption adds an option to a given section and subsection. Use the +// NoSubsection constant for the subsection argument if no subsection is wanted. +func (c *Config) AddOption(section string, subsection string, key string, value string) *Config { + if subsection == "" { + c.Section(section).AddOption(key, value) + } else { + c.Section(section).Subsection(subsection).AddOption(key, value) + } + + return c +} + +// SetOption sets an option to a given section and subsection. Use the +// NoSubsection constant for the subsection argument if no subsection is wanted. +func (c *Config) SetOption(section string, subsection string, key string, value string) *Config { + if subsection == "" { + c.Section(section).SetOption(key, value) + } else { + c.Section(section).Subsection(subsection).SetOption(key, value) + } + + return c +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go new file mode 100644 index 0000000000..8e52d57f30 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/decoder.go @@ -0,0 +1,37 @@ +package config + +import ( + "io" + + "github.com/go-git/gcfg" +) + +// A Decoder reads and decodes config files from an input stream. +type Decoder struct { + io.Reader +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r} +} + +// Decode reads the whole config from its input and stores it in the +// value pointed to by config. +func (d *Decoder) Decode(config *Config) error { + cb := func(s string, ss string, k string, v string, bv bool) error { + if ss == "" && k == "" { + config.Section(s) + return nil + } + + if ss != "" && k == "" { + config.Section(s).Subsection(ss) + return nil + } + + config.AddOption(s, ss, k, v) + return nil + } + return gcfg.ReadWithCallback(d, cb) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go new file mode 100644 index 0000000000..3986c83658 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/doc.go @@ -0,0 +1,122 @@ +// Package config implements encoding and decoding of git config files. +// +// Configuration File +// ------------------ +// +// The Git configuration file contains a number of variables that affect +// the Git commands' behavior. The `.git/config` file in each repository +// is used to store the configuration for that repository, and +// `$HOME/.gitconfig` is used to store a per-user configuration as +// fallback values for the `.git/config` file. The file `/etc/gitconfig` +// can be used to store a system-wide default configuration. +// +// The configuration variables are used by both the Git plumbing +// and the porcelains. The variables are divided into sections, wherein +// the fully qualified variable name of the variable itself is the last +// dot-separated segment and the section name is everything before the last +// dot. The variable names are case-insensitive, allow only alphanumeric +// characters and `-`, and must start with an alphabetic character. Some +// variables may appear multiple times; we say then that the variable is +// multivalued. +// +// Syntax +// ~~~~~~ +// +// The syntax is fairly flexible and permissive; whitespaces are mostly +// ignored. The '#' and ';' characters begin comments to the end of line, +// blank lines are ignored. +// +// The file consists of sections and variables. A section begins with +// the name of the section in square brackets and continues until the next +// section begins. Section names are case-insensitive. Only alphanumeric +// characters, `-` and `.` are allowed in section names. Each variable +// must belong to some section, which means that there must be a section +// header before the first setting of a variable. +// +// Sections can be further divided into subsections. To begin a subsection +// put its name in double quotes, separated by space from the section name, +// in the section header, like in the example below: +// +// -------- +// [section "subsection"] +// +// -------- +// +// Subsection names are case sensitive and can contain any characters except +// newline (doublequote `"` and backslash can be included by escaping them +// as `\"` and `\\`, respectively). Section headers cannot span multiple +// lines. Variables may belong directly to a section or to a given subsection. +// You can have `[section]` if you have `[section "subsection"]`, but you +// don't need to. +// +// There is also a deprecated `[section.subsection]` syntax. With this +// syntax, the subsection name is converted to lower-case and is also +// compared case sensitively. These subsection names follow the same +// restrictions as section names. +// +// All the other lines (and the remainder of the line after the section +// header) are recognized as setting variables, in the form +// 'name = value' (or just 'name', which is a short-hand to say that +// the variable is the boolean "true"). +// The variable names are case-insensitive, allow only alphanumeric characters +// and `-`, and must start with an alphabetic character. +// +// A line that defines a value can be continued to the next line by +// ending it with a `\`; the backquote and the end-of-line are +// stripped. Leading whitespaces after 'name =', the remainder of the +// line after the first comment character '#' or ';', and trailing +// whitespaces of the line are discarded unless they are enclosed in +// double quotes. Internal whitespaces within the value are retained +// verbatim. +// +// Inside double quotes, double quote `"` and backslash `\` characters +// must be escaped: use `\"` for `"` and `\\` for `\`. +// +// The following escape sequences (beside `\"` and `\\`) are recognized: +// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB) +// and `\b` for backspace (BS). Other char escape sequences (including octal +// escape sequences) are invalid. +// +// Includes +// ~~~~~~~~ +// +// You can include one config file from another by setting the special +// `include.path` variable to the name of the file to be included. The +// variable takes a pathname as its value, and is subject to tilde +// expansion. +// +// The included file is expanded immediately, as if its contents had been +// found at the location of the include directive. If the value of the +// `include.path` variable is a relative path, the path is considered to be +// relative to the configuration file in which the include directive was +// found. See below for examples. +// +// +// Example +// ~~~~~~~ +// +// # Core variables +// [core] +// ; Don't trust file modes +// filemode = false +// +// # Our diff algorithm +// [diff] +// external = /usr/local/bin/diff-wrapper +// renames = true +// +// [branch "devel"] +// remote = origin +// merge = refs/heads/devel +// +// # Proxy settings +// [core] +// gitProxy="ssh" for "kernel.org" +// gitProxy=default-proxy ; for the rest +// +// [include] +// path = /path/to/foo.inc ; include by absolute path +// path = foo ; expand "foo" relative to the current file +// path = ~/foo ; expand "foo" in your `$HOME` directory +// +package config diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go new file mode 100644 index 0000000000..4eac8968ad --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/encoder.go @@ -0,0 +1,77 @@ +package config + +import ( + "fmt" + "io" + "strings" +) + +// An Encoder writes config files to an output stream. +type Encoder struct { + w io.Writer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{w} +} + +// Encode writes the config in git config format to the stream of the encoder. +func (e *Encoder) Encode(cfg *Config) error { + for _, s := range cfg.Sections { + if err := e.encodeSection(s); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSection(s *Section) error { + if len(s.Options) > 0 { + if err := e.printf("[%s]\n", s.Name); err != nil { + return err + } + + if err := e.encodeOptions(s.Options); err != nil { + return err + } + } + + for _, ss := range s.Subsections { + if err := e.encodeSubsection(s.Name, ss); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeSubsection(sectionName string, s *Subsection) error { + //TODO: escape + if err := e.printf("[%s \"%s\"]\n", sectionName, s.Name); err != nil { + return err + } + + return e.encodeOptions(s.Options) +} + +func (e *Encoder) encodeOptions(opts Options) error { + for _, o := range opts { + pattern := "\t%s = %s\n" + if strings.Contains(o.Value, "\\") { + pattern = "\t%s = %q\n" + } + + if err := e.printf(pattern, o.Key, o.Value); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) printf(msg string, args ...interface{}) error { + _, err := fmt.Fprintf(e.w, msg, args...) + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go new file mode 100644 index 0000000000..cad394810a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/option.go @@ -0,0 +1,127 @@ +package config + +import ( + "fmt" + "strings" +) + +// Option defines a key/value entity in a config file. +type Option struct { + // Key preserving original caseness. + // Use IsKey instead to compare key regardless of caseness. + Key string + // Original value as string, could be not normalized. + Value string +} + +type Options []*Option + +// IsKey returns true if the given key matches +// this option's key in a case-insensitive comparison. +func (o *Option) IsKey(key string) bool { + return strings.EqualFold(o.Key, key) +} + +func (opts Options) GoString() string { + var strs []string + for _, opt := range opts { + strs = append(strs, fmt.Sprintf("%#v", opt)) + } + + return strings.Join(strs, ", ") +} + +// Get gets the value for the given key if set, +// otherwise it returns the empty string. +// +// Note that there is no difference +// +// This matches git behaviour since git v1.8.1-rc1, +// if there are multiple definitions of a key, the +// last one wins. +// +// See: http://article.gmane.org/gmane.linux.kernel/1407184 +// +// In order to get all possible values for the same key, +// use GetAll. +func (opts Options) Get(key string) string { + for i := len(opts) - 1; i >= 0; i-- { + o := opts[i] + if o.IsKey(key) { + return o.Value + } + } + return "" +} + +// Has checks if an Option exist with the given key. +func (opts Options) Has(key string) bool { + for _, o := range opts { + if o.IsKey(key) { + return true + } + } + return false +} + +// GetAll returns all possible values for the same key. +func (opts Options) GetAll(key string) []string { + result := []string{} + for _, o := range opts { + if o.IsKey(key) { + result = append(result, o.Value) + } + } + return result +} + +func (opts Options) withoutOption(key string) Options { + result := Options{} + for _, o := range opts { + if !o.IsKey(key) { + result = append(result, o) + } + } + return result +} + +func (opts Options) withAddedOption(key string, value string) Options { + return append(opts, &Option{key, value}) +} + +func (opts Options) withSettedOption(key string, values ...string) Options { + var result Options + var added []string + for _, o := range opts { + if !o.IsKey(key) { + result = append(result, o) + continue + } + + if contains(values, o.Value) { + added = append(added, o.Value) + result = append(result, o) + continue + } + } + + for _, value := range values { + if contains(added, value) { + continue + } + + result = result.withAddedOption(key, value) + } + + return result +} + +func contains(haystack []string, needle string) bool { + for _, s := range haystack { + if s == needle { + return true + } + } + + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go new file mode 100644 index 0000000000..07f72f35af --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/config/section.go @@ -0,0 +1,181 @@ +package config + +import ( + "fmt" + "strings" +) + +// Section is the representation of a section inside git configuration files. +// Each Section contains Options that are used by both the Git plumbing +// and the porcelains. +// Sections can be further divided into subsections. To begin a subsection +// put its name in double quotes, separated by space from the section name, +// in the section header, like in the example below: +// +// [section "subsection"] +// +// All the other lines (and the remainder of the line after the section header) +// are recognized as option variables, in the form "name = value" (or just name, +// which is a short-hand to say that the variable is the boolean "true"). +// The variable names are case-insensitive, allow only alphanumeric characters +// and -, and must start with an alphabetic character: +// +// [section "subsection1"] +// option1 = value1 +// option2 +// [section "subsection2"] +// option3 = value2 +// +type Section struct { + Name string + Options Options + Subsections Subsections +} + +type Subsection struct { + Name string + Options Options +} + +type Sections []*Section + +func (s Sections) GoString() string { + var strs []string + for _, ss := range s { + strs = append(strs, fmt.Sprintf("%#v", ss)) + } + + return strings.Join(strs, ", ") +} + +type Subsections []*Subsection + +func (s Subsections) GoString() string { + var strs []string + for _, ss := range s { + strs = append(strs, fmt.Sprintf("%#v", ss)) + } + + return strings.Join(strs, ", ") +} + +// IsName checks if the name provided is equals to the Section name, case insensitive. +func (s *Section) IsName(name string) bool { + return strings.EqualFold(s.Name, name) +} + +// Subsection returns a Subsection from the specified Section. If the +// Subsection does not exists, new one is created and added to Section. +func (s *Section) Subsection(name string) *Subsection { + for i := len(s.Subsections) - 1; i >= 0; i-- { + ss := s.Subsections[i] + if ss.IsName(name) { + return ss + } + } + + ss := &Subsection{Name: name} + s.Subsections = append(s.Subsections, ss) + return ss +} + +// HasSubsection checks if the Section has a Subsection with the specified name. +func (s *Section) HasSubsection(name string) bool { + for _, ss := range s.Subsections { + if ss.IsName(name) { + return true + } + } + + return false +} + +// RemoveSubsection removes a subsection from a Section. +func (s *Section) RemoveSubsection(name string) *Section { + result := Subsections{} + for _, s := range s.Subsections { + if !s.IsName(name) { + result = append(result, s) + } + } + + s.Subsections = result + return s +} + +// Option return the value for the specified key. Empty string is returned if +// key does not exists. +func (s *Section) Option(key string) string { + return s.Options.Get(key) +} + +// OptionAll returns all possible values for an option with the specified key. +// If the option does not exists, an empty slice will be returned. +func (s *Section) OptionAll(key string) []string { + return s.Options.GetAll(key) +} + +// HasOption checks if the Section has an Option with the given key. +func (s *Section) HasOption(key string) bool { + return s.Options.Has(key) +} + +// AddOption adds a new Option to the Section. The updated Section is returned. +func (s *Section) AddOption(key string, value string) *Section { + s.Options = s.Options.withAddedOption(key, value) + return s +} + +// SetOption adds a new Option to the Section. If the option already exists, is replaced. +// The updated Section is returned. +func (s *Section) SetOption(key string, value string) *Section { + s.Options = s.Options.withSettedOption(key, value) + return s +} + +// Remove an option with the specified key. The updated Section is returned. +func (s *Section) RemoveOption(key string) *Section { + s.Options = s.Options.withoutOption(key) + return s +} + +// IsName checks if the name of the subsection is exactly the specified name. +func (s *Subsection) IsName(name string) bool { + return s.Name == name +} + +// Option returns an option with the specified key. If the option does not exists, +// empty spring will be returned. +func (s *Subsection) Option(key string) string { + return s.Options.Get(key) +} + +// OptionAll returns all possible values for an option with the specified key. +// If the option does not exists, an empty slice will be returned. +func (s *Subsection) OptionAll(key string) []string { + return s.Options.GetAll(key) +} + +// HasOption checks if the Subsection has an Option with the given key. +func (s *Subsection) HasOption(key string) bool { + return s.Options.Has(key) +} + +// AddOption adds a new Option to the Subsection. The updated Subsection is returned. +func (s *Subsection) AddOption(key string, value string) *Subsection { + s.Options = s.Options.withAddedOption(key, value) + return s +} + +// SetOption adds a new Option to the Subsection. If the option already exists, is replaced. +// The updated Subsection is returned. +func (s *Subsection) SetOption(key string, value ...string) *Subsection { + s.Options = s.Options.withSettedOption(key, value...) + return s +} + +// RemoveOption removes the option with the specified key. The updated Subsection is returned. +func (s *Subsection) RemoveOption(key string) *Subsection { + s.Options = s.Options.withoutOption(key) + return s +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go new file mode 100644 index 0000000000..6fd4158462 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/colorconfig.go @@ -0,0 +1,97 @@ +package diff + +import "github.com/go-git/go-git/v5/plumbing/color" + +// A ColorKey is a key into a ColorConfig map and also equal to the key in the +// diff.color subsection of the config. See +// https://github.com/git/git/blob/v2.26.2/diff.c#L83-L106. +type ColorKey string + +// ColorKeys. +const ( + Context ColorKey = "context" + Meta ColorKey = "meta" + Frag ColorKey = "frag" + Old ColorKey = "old" + New ColorKey = "new" + Commit ColorKey = "commit" + Whitespace ColorKey = "whitespace" + Func ColorKey = "func" + OldMoved ColorKey = "oldMoved" + OldMovedAlternative ColorKey = "oldMovedAlternative" + OldMovedDimmed ColorKey = "oldMovedDimmed" + OldMovedAlternativeDimmed ColorKey = "oldMovedAlternativeDimmed" + NewMoved ColorKey = "newMoved" + NewMovedAlternative ColorKey = "newMovedAlternative" + NewMovedDimmed ColorKey = "newMovedDimmed" + NewMovedAlternativeDimmed ColorKey = "newMovedAlternativeDimmed" + ContextDimmed ColorKey = "contextDimmed" + OldDimmed ColorKey = "oldDimmed" + NewDimmed ColorKey = "newDimmed" + ContextBold ColorKey = "contextBold" + OldBold ColorKey = "oldBold" + NewBold ColorKey = "newBold" +) + +// A ColorConfig is a color configuration. A nil or empty ColorConfig +// corresponds to no color. +type ColorConfig map[ColorKey]string + +// A ColorConfigOption sets an option on a ColorConfig. +type ColorConfigOption func(ColorConfig) + +// WithColor sets the color for key. +func WithColor(key ColorKey, color string) ColorConfigOption { + return func(cc ColorConfig) { + cc[key] = color + } +} + +// defaultColorConfig is the default color configuration. See +// https://github.com/git/git/blob/v2.26.2/diff.c#L57-L81. +var defaultColorConfig = ColorConfig{ + Context: color.Normal, + Meta: color.Bold, + Frag: color.Cyan, + Old: color.Red, + New: color.Green, + Commit: color.Yellow, + Whitespace: color.BgRed, + Func: color.Normal, + OldMoved: color.BoldMagenta, + OldMovedAlternative: color.BoldBlue, + OldMovedDimmed: color.Faint, + OldMovedAlternativeDimmed: color.FaintItalic, + NewMoved: color.BoldCyan, + NewMovedAlternative: color.BoldYellow, + NewMovedDimmed: color.Faint, + NewMovedAlternativeDimmed: color.FaintItalic, + ContextDimmed: color.Faint, + OldDimmed: color.FaintRed, + NewDimmed: color.FaintGreen, + ContextBold: color.Bold, + OldBold: color.BoldRed, + NewBold: color.BoldGreen, +} + +// NewColorConfig returns a new ColorConfig. +func NewColorConfig(options ...ColorConfigOption) ColorConfig { + cc := make(ColorConfig) + for key, value := range defaultColorConfig { + cc[key] = value + } + for _, option := range options { + option(cc) + } + return cc +} + +// Reset returns the ANSI escape sequence to reset the color with key set from +// cc. If no color was set then no reset is needed so it returns the empty +// string. +func (cc ColorConfig) Reset(key ColorKey) string { + if cc[key] == "" { + return "" + } + return color.Reset +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go new file mode 100644 index 0000000000..39a66a1a80 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/patch.go @@ -0,0 +1,58 @@ +package diff + +import ( + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" +) + +// Operation defines the operation of a diff item. +type Operation int + +const ( + // Equal item represents a equals diff. + Equal Operation = iota + // Add item represents an insert diff. + Add + // Delete item represents a delete diff. + Delete +) + +// Patch represents a collection of steps to transform several files. +type Patch interface { + // FilePatches returns a slice of patches per file. + FilePatches() []FilePatch + // Message returns an optional message that can be at the top of the + // Patch representation. + Message() string +} + +// FilePatch represents the necessary steps to transform one file to another. +type FilePatch interface { + // IsBinary returns true if this patch is representing a binary file. + IsBinary() bool + // Files returns the from and to Files, with all the necessary metadata to + // about them. If the patch creates a new file, "from" will be nil. + // If the patch deletes a file, "to" will be nil. + Files() (from, to File) + // Chunks returns a slice of ordered changes to transform "from" File to + // "to" File. If the file is a binary one, Chunks will be empty. + Chunks() []Chunk +} + +// File contains all the file metadata necessary to print some patch formats. +type File interface { + // Hash returns the File Hash. + Hash() plumbing.Hash + // Mode returns the FileMode. + Mode() filemode.FileMode + // Path returns the complete Path to the file, including the filename. + Path() string +} + +// Chunk represents a portion of a file transformation to another. +type Chunk interface { + // Content contains the portion of the file. + Content() string + // Type contains the Operation to do with this Chunk. + Type() Operation +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go new file mode 100644 index 0000000000..fa605b1985 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/diff/unified_encoder.go @@ -0,0 +1,395 @@ +package diff + +import ( + "fmt" + "io" + "regexp" + "strconv" + "strings" + + "github.com/go-git/go-git/v5/plumbing" +) + +// DefaultContextLines is the default number of context lines. +const DefaultContextLines = 3 + +var ( + splitLinesRegexp = regexp.MustCompile(`[^\n]*(\n|$)`) + + operationChar = map[Operation]byte{ + Add: '+', + Delete: '-', + Equal: ' ', + } + + operationColorKey = map[Operation]ColorKey{ + Add: New, + Delete: Old, + Equal: Context, + } +) + +// UnifiedEncoder encodes an unified diff into the provided Writer. It does not +// support similarity index for renames or sorting hash representations. +type UnifiedEncoder struct { + io.Writer + + // contextLines is the count of unchanged lines that will appear surrounding + // a change. + contextLines int + + // srcPrefix and dstPrefix are prepended to file paths when encoding a diff. + srcPrefix string + dstPrefix string + + // colorConfig is the color configuration. The default is no color. + color ColorConfig +} + +// NewUnifiedEncoder returns a new UnifiedEncoder that writes to w. +func NewUnifiedEncoder(w io.Writer, contextLines int) *UnifiedEncoder { + return &UnifiedEncoder{ + Writer: w, + srcPrefix: "a/", + dstPrefix: "b/", + contextLines: contextLines, + } +} + +// SetColor sets e's color configuration and returns e. +func (e *UnifiedEncoder) SetColor(colorConfig ColorConfig) *UnifiedEncoder { + e.color = colorConfig + return e +} + +// SetSrcPrefix sets e's srcPrefix and returns e. +func (e *UnifiedEncoder) SetSrcPrefix(prefix string) *UnifiedEncoder { + e.srcPrefix = prefix + return e +} + +// SetDstPrefix sets e's dstPrefix and returns e. +func (e *UnifiedEncoder) SetDstPrefix(prefix string) *UnifiedEncoder { + e.dstPrefix = prefix + return e +} + +// Encode encodes patch. +func (e *UnifiedEncoder) Encode(patch Patch) error { + sb := &strings.Builder{} + + if message := patch.Message(); message != "" { + sb.WriteString(message) + if !strings.HasSuffix(message, "\n") { + sb.WriteByte('\n') + } + } + + for _, filePatch := range patch.FilePatches() { + e.writeFilePatchHeader(sb, filePatch) + g := newHunksGenerator(filePatch.Chunks(), e.contextLines) + for _, hunk := range g.Generate() { + hunk.writeTo(sb, e.color) + } + } + + _, err := e.Write([]byte(sb.String())) + return err +} + +func (e *UnifiedEncoder) writeFilePatchHeader(sb *strings.Builder, filePatch FilePatch) { + from, to := filePatch.Files() + if from == nil && to == nil { + return + } + isBinary := filePatch.IsBinary() + + var lines []string + switch { + case from != nil && to != nil: + hashEquals := from.Hash() == to.Hash() + lines = append(lines, + fmt.Sprintf("diff --git %s%s %s%s", + e.srcPrefix, from.Path(), e.dstPrefix, to.Path()), + ) + if from.Mode() != to.Mode() { + lines = append(lines, + fmt.Sprintf("old mode %o", from.Mode()), + fmt.Sprintf("new mode %o", to.Mode()), + ) + } + if from.Path() != to.Path() { + lines = append(lines, + fmt.Sprintf("rename from %s", from.Path()), + fmt.Sprintf("rename to %s", to.Path()), + ) + } + if from.Mode() != to.Mode() && !hashEquals { + lines = append(lines, + fmt.Sprintf("index %s..%s", from.Hash(), to.Hash()), + ) + } else if !hashEquals { + lines = append(lines, + fmt.Sprintf("index %s..%s %o", from.Hash(), to.Hash(), from.Mode()), + ) + } + if !hashEquals { + lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), e.dstPrefix+to.Path(), isBinary) + } + case from == nil: + lines = append(lines, + fmt.Sprintf("diff --git %s %s", e.srcPrefix+to.Path(), e.dstPrefix+to.Path()), + fmt.Sprintf("new file mode %o", to.Mode()), + fmt.Sprintf("index %s..%s", plumbing.ZeroHash, to.Hash()), + ) + lines = e.appendPathLines(lines, "/dev/null", e.dstPrefix+to.Path(), isBinary) + case to == nil: + lines = append(lines, + fmt.Sprintf("diff --git %s %s", e.srcPrefix+from.Path(), e.dstPrefix+from.Path()), + fmt.Sprintf("deleted file mode %o", from.Mode()), + fmt.Sprintf("index %s..%s", from.Hash(), plumbing.ZeroHash), + ) + lines = e.appendPathLines(lines, e.srcPrefix+from.Path(), "/dev/null", isBinary) + } + + sb.WriteString(e.color[Meta]) + sb.WriteString(lines[0]) + for _, line := range lines[1:] { + sb.WriteByte('\n') + sb.WriteString(line) + } + sb.WriteString(e.color.Reset(Meta)) + sb.WriteByte('\n') +} + +func (e *UnifiedEncoder) appendPathLines(lines []string, fromPath, toPath string, isBinary bool) []string { + if isBinary { + return append(lines, + fmt.Sprintf("Binary files %s and %s differ", fromPath, toPath), + ) + } + return append(lines, + fmt.Sprintf("--- %s", fromPath), + fmt.Sprintf("+++ %s", toPath), + ) +} + +type hunksGenerator struct { + fromLine, toLine int + ctxLines int + chunks []Chunk + current *hunk + hunks []*hunk + beforeContext, afterContext []string +} + +func newHunksGenerator(chunks []Chunk, ctxLines int) *hunksGenerator { + return &hunksGenerator{ + chunks: chunks, + ctxLines: ctxLines, + } +} + +func (g *hunksGenerator) Generate() []*hunk { + for i, chunk := range g.chunks { + lines := splitLines(chunk.Content()) + nLines := len(lines) + + switch chunk.Type() { + case Equal: + g.fromLine += nLines + g.toLine += nLines + g.processEqualsLines(lines, i) + case Delete: + if nLines != 0 { + g.fromLine++ + } + + g.processHunk(i, chunk.Type()) + g.fromLine += nLines - 1 + g.current.AddOp(chunk.Type(), lines...) + case Add: + if nLines != 0 { + g.toLine++ + } + g.processHunk(i, chunk.Type()) + g.toLine += nLines - 1 + g.current.AddOp(chunk.Type(), lines...) + } + + if i == len(g.chunks)-1 && g.current != nil { + g.hunks = append(g.hunks, g.current) + } + } + + return g.hunks +} + +func (g *hunksGenerator) processHunk(i int, op Operation) { + if g.current != nil { + return + } + + var ctxPrefix string + linesBefore := len(g.beforeContext) + if linesBefore > g.ctxLines { + ctxPrefix = g.beforeContext[linesBefore-g.ctxLines-1] + g.beforeContext = g.beforeContext[linesBefore-g.ctxLines:] + linesBefore = g.ctxLines + } + + g.current = &hunk{ctxPrefix: strings.TrimSuffix(ctxPrefix, "\n")} + g.current.AddOp(Equal, g.beforeContext...) + + switch op { + case Delete: + g.current.fromLine, g.current.toLine = + g.addLineNumbers(g.fromLine, g.toLine, linesBefore, i, Add) + case Add: + g.current.toLine, g.current.fromLine = + g.addLineNumbers(g.toLine, g.fromLine, linesBefore, i, Delete) + } + + g.beforeContext = nil +} + +// addLineNumbers obtains the line numbers in a new chunk. +func (g *hunksGenerator) addLineNumbers(la, lb int, linesBefore int, i int, op Operation) (cla, clb int) { + cla = la - linesBefore + // we need to search for a reference for the next diff + switch { + case linesBefore != 0 && g.ctxLines != 0: + if lb > g.ctxLines { + clb = lb - g.ctxLines + 1 + } else { + clb = 1 + } + case g.ctxLines == 0: + clb = lb + case i != len(g.chunks)-1: + next := g.chunks[i+1] + if next.Type() == op || next.Type() == Equal { + // this diff will be into this chunk + clb = lb + 1 + } + } + + return +} + +func (g *hunksGenerator) processEqualsLines(ls []string, i int) { + if g.current == nil { + g.beforeContext = append(g.beforeContext, ls...) + return + } + + g.afterContext = append(g.afterContext, ls...) + if len(g.afterContext) <= g.ctxLines*2 && i != len(g.chunks)-1 { + g.current.AddOp(Equal, g.afterContext...) + g.afterContext = nil + } else { + ctxLines := g.ctxLines + if ctxLines > len(g.afterContext) { + ctxLines = len(g.afterContext) + } + g.current.AddOp(Equal, g.afterContext[:ctxLines]...) + g.hunks = append(g.hunks, g.current) + + g.current = nil + g.beforeContext = g.afterContext[ctxLines:] + g.afterContext = nil + } +} + +func splitLines(s string) []string { + out := splitLinesRegexp.FindAllString(s, -1) + if out[len(out)-1] == "" { + out = out[:len(out)-1] + } + return out +} + +type hunk struct { + fromLine int + toLine int + + fromCount int + toCount int + + ctxPrefix string + ops []*op +} + +func (h *hunk) writeTo(sb *strings.Builder, color ColorConfig) { + sb.WriteString(color[Frag]) + sb.WriteString("@@ -") + + if h.fromCount == 1 { + sb.WriteString(strconv.Itoa(h.fromLine)) + } else { + sb.WriteString(strconv.Itoa(h.fromLine)) + sb.WriteByte(',') + sb.WriteString(strconv.Itoa(h.fromCount)) + } + + sb.WriteString(" +") + + if h.toCount == 1 { + sb.WriteString(strconv.Itoa(h.toLine)) + } else { + sb.WriteString(strconv.Itoa(h.toLine)) + sb.WriteByte(',') + sb.WriteString(strconv.Itoa(h.toCount)) + } + + sb.WriteString(" @@") + sb.WriteString(color.Reset(Frag)) + + if h.ctxPrefix != "" { + sb.WriteByte(' ') + sb.WriteString(color[Func]) + sb.WriteString(h.ctxPrefix) + sb.WriteString(color.Reset(Func)) + } + + sb.WriteByte('\n') + + for _, op := range h.ops { + op.writeTo(sb, color) + } +} + +func (h *hunk) AddOp(t Operation, ss ...string) { + n := len(ss) + switch t { + case Add: + h.toCount += n + case Delete: + h.fromCount += n + case Equal: + h.toCount += n + h.fromCount += n + } + + for _, s := range ss { + h.ops = append(h.ops, &op{s, t}) + } +} + +type op struct { + text string + t Operation +} + +func (o *op) writeTo(sb *strings.Builder, color ColorConfig) { + colorKey := operationColorKey[o.t] + sb.WriteString(color[colorKey]) + sb.WriteByte(operationChar[o.t]) + if strings.HasSuffix(o.text, "\n") { + sb.WriteString(strings.TrimSuffix(o.text, "\n")) + } else { + sb.WriteString(o.text + "\n\\ No newline at end of file") + } + sb.WriteString(color.Reset(colorKey)) + sb.WriteByte('\n') +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go new file mode 100644 index 0000000000..7cea50cd8f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go @@ -0,0 +1,135 @@ +package gitignore + +import ( + "bufio" + "bytes" + "io/ioutil" + "os" + "strings" + + "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/plumbing/format/config" + gioutil "github.com/go-git/go-git/v5/utils/ioutil" +) + +const ( + commentPrefix = "#" + coreSection = "core" + excludesfile = "excludesfile" + gitDir = ".git" + gitignoreFile = ".gitignore" + gitconfigFile = ".gitconfig" + systemFile = "/etc/gitconfig" +) + +// readIgnoreFile reads a specific git ignore file. +func readIgnoreFile(fs billy.Filesystem, path []string, ignoreFile string) (ps []Pattern, err error) { + f, err := fs.Open(fs.Join(append(path, ignoreFile)...)) + if err == nil { + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + s := scanner.Text() + if !strings.HasPrefix(s, commentPrefix) && len(strings.TrimSpace(s)) > 0 { + ps = append(ps, ParsePattern(s, path)) + } + } + } else if !os.IsNotExist(err) { + return nil, err + } + + return +} + +// ReadPatterns reads gitignore patterns recursively traversing through the directory +// structure. The result is in the ascending order of priority (last higher). +func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error) { + ps, _ = readIgnoreFile(fs, path, gitignoreFile) + + var fis []os.FileInfo + fis, err = fs.ReadDir(fs.Join(path...)) + if err != nil { + return + } + + for _, fi := range fis { + if fi.IsDir() && fi.Name() != gitDir { + var subps []Pattern + subps, err = ReadPatterns(fs, append(path, fi.Name())) + if err != nil { + return + } + + if len(subps) > 0 { + ps = append(ps, subps...) + } + } + } + + return +} + +func loadPatterns(fs billy.Filesystem, path string) (ps []Pattern, err error) { + f, err := fs.Open(path) + if err != nil { + if os.IsNotExist(err) { + return nil, nil + } + return nil, err + } + + defer gioutil.CheckClose(f, &err) + + b, err := ioutil.ReadAll(f) + if err != nil { + return + } + + d := config.NewDecoder(bytes.NewBuffer(b)) + + raw := config.New() + if err = d.Decode(raw); err != nil { + return + } + + s := raw.Section(coreSection) + efo := s.Options.Get(excludesfile) + if efo == "" { + return nil, nil + } + + ps, err = readIgnoreFile(fs, nil, efo) + if os.IsNotExist(err) { + return nil, nil + } + + return +} + +// LoadGlobalPatterns loads gitignore patterns from from the gitignore file +// declared in a user's ~/.gitconfig file. If the ~/.gitconfig file does not +// exist the function will return nil. If the core.excludesfile property +// is not declared, the function will return nil. If the file pointed to by +// the core.excludesfile property does not exist, the function will return nil. +// +// The function assumes fs is rooted at the root filesystem. +func LoadGlobalPatterns(fs billy.Filesystem) (ps []Pattern, err error) { + home, err := os.UserHomeDir() + if err != nil { + return + } + + return loadPatterns(fs, fs.Join(home, gitconfigFile)) +} + +// LoadSystemPatterns loads gitignore patterns from from the gitignore file +// declared in a system's /etc/gitconfig file. If the /etc/gitconfig file does +// not exist the function will return nil. If the core.excludesfile property +// is not declared, the function will return nil. If the file pointed to by +// the core.excludesfile property does not exist, the function will return nil. +// +// The function assumes fs is rooted at the root filesystem. +func LoadSystemPatterns(fs billy.Filesystem) (ps []Pattern, err error) { + return loadPatterns(fs, systemFile) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go new file mode 100644 index 0000000000..eecd4baccb --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/doc.go @@ -0,0 +1,70 @@ +// Package gitignore implements matching file system paths to gitignore patterns that +// can be automatically read from a git repository tree in the order of definition +// priorities. It support all pattern formats as specified in the original gitignore +// documentation, copied below: +// +// Pattern format +// ============== +// +// - A blank line matches no files, so it can serve as a separator for readability. +// +// - A line starting with # serves as a comment. Put a backslash ("\") in front of +// the first hash for patterns that begin with a hash. +// +// - Trailing spaces are ignored unless they are quoted with backslash ("\"). +// +// - An optional prefix "!" which negates the pattern; any matching file excluded +// by a previous pattern will become included again. It is not possible to +// re-include a file if a parent directory of that file is excluded. +// Git doesn’t list excluded directories for performance reasons, so +// any patterns on contained files have no effect, no matter where they are +// defined. Put a backslash ("\") in front of the first "!" for patterns +// that begin with a literal "!", for example, "\!important!.txt". +// +// - If the pattern ends with a slash, it is removed for the purpose of the +// following description, but it would only find a match with a directory. +// In other words, foo/ will match a directory foo and paths underneath it, +// but will not match a regular file or a symbolic link foo (this is consistent +// with the way how pathspec works in general in Git). +// +// - If the pattern does not contain a slash /, Git treats it as a shell glob +// pattern and checks for a match against the pathname relative to the location +// of the .gitignore file (relative to the toplevel of the work tree if not +// from a .gitignore file). +// +// - Otherwise, Git treats the pattern as a shell glob suitable for consumption +// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will +// not match a / in the pathname. For example, "Documentation/*.html" matches +// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or +// "tools/perf/Documentation/perf.html". +// +// - A leading slash matches the beginning of the pathname. For example, +// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c". +// +// Two consecutive asterisks ("**") in patterns matched against full pathname +// may have special meaning: +// +// - A leading "**" followed by a slash means match in all directories. +// For example, "**/foo" matches file or directory "foo" anywhere, the same as +// pattern "foo". "**/foo/bar" matches file or directory "bar" +// anywhere that is directly under directory "foo". +// +// - A trailing "/**" matches everything inside. For example, "abc/**" matches +// all files inside directory "abc", relative to the location of the +// .gitignore file, with infinite depth. +// +// - A slash followed by two consecutive asterisks then a slash matches +// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b", +// "a/x/y/b" and so on. +// +// - Other consecutive asterisks are considered invalid. +// +// Copyright and license +// ===================== +// +// Copyright (c) Oleg Sklyar, Silvertern and source{d} +// +// The package code was donated to source{d} to include, modify and develop +// further as a part of the `go-git` project, release it on the license of +// the whole project or delete it from the project. +package gitignore diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go new file mode 100644 index 0000000000..bd1e9e2d4c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/matcher.go @@ -0,0 +1,30 @@ +package gitignore + +// Matcher defines a global multi-pattern matcher for gitignore patterns +type Matcher interface { + // Match matches patterns in the order of priorities. As soon as an inclusion or + // exclusion is found, not further matching is performed. + Match(path []string, isDir bool) bool +} + +// NewMatcher constructs a new global matcher. Patterns must be given in the order of +// increasing priority. That is most generic settings files first, then the content of +// the repo .gitignore, then content of .gitignore down the path or the repo and then +// the content command line arguments. +func NewMatcher(ps []Pattern) Matcher { + return &matcher{ps} +} + +type matcher struct { + patterns []Pattern +} + +func (m *matcher) Match(path []string, isDir bool) bool { + n := len(m.patterns) + for i := n - 1; i >= 0; i-- { + if match := m.patterns[i].Match(path, isDir); match > NoMatch { + return match == Exclude + } + } + return false +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go new file mode 100644 index 0000000000..098cb50212 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/pattern.go @@ -0,0 +1,153 @@ +package gitignore + +import ( + "path/filepath" + "strings" +) + +// MatchResult defines outcomes of a match, no match, exclusion or inclusion. +type MatchResult int + +const ( + // NoMatch defines the no match outcome of a match check + NoMatch MatchResult = iota + // Exclude defines an exclusion of a file as a result of a match check + Exclude + // Include defines an explicit inclusion of a file as a result of a match check + Include +) + +const ( + inclusionPrefix = "!" + zeroToManyDirs = "**" + patternDirSep = "/" +) + +// Pattern defines a single gitignore pattern. +type Pattern interface { + // Match matches the given path to the pattern. + Match(path []string, isDir bool) MatchResult +} + +type pattern struct { + domain []string + pattern []string + inclusion bool + dirOnly bool + isGlob bool +} + +// ParsePattern parses a gitignore pattern string into the Pattern structure. +func ParsePattern(p string, domain []string) Pattern { + res := pattern{domain: domain} + + if strings.HasPrefix(p, inclusionPrefix) { + res.inclusion = true + p = p[1:] + } + + if !strings.HasSuffix(p, "\\ ") { + p = strings.TrimRight(p, " ") + } + + if strings.HasSuffix(p, patternDirSep) { + res.dirOnly = true + p = p[:len(p)-1] + } + + if strings.Contains(p, patternDirSep) { + res.isGlob = true + } + + res.pattern = strings.Split(p, patternDirSep) + return &res +} + +func (p *pattern) Match(path []string, isDir bool) MatchResult { + if len(path) <= len(p.domain) { + return NoMatch + } + for i, e := range p.domain { + if path[i] != e { + return NoMatch + } + } + + path = path[len(p.domain):] + if p.isGlob && !p.globMatch(path, isDir) { + return NoMatch + } else if !p.isGlob && !p.simpleNameMatch(path, isDir) { + return NoMatch + } + + if p.inclusion { + return Include + } else { + return Exclude + } +} + +func (p *pattern) simpleNameMatch(path []string, isDir bool) bool { + for i, name := range path { + if match, err := filepath.Match(p.pattern[0], name); err != nil { + return false + } else if !match { + continue + } + if p.dirOnly && !isDir && i == len(path)-1 { + return false + } + return true + } + return false +} + +func (p *pattern) globMatch(path []string, isDir bool) bool { + matched := false + canTraverse := false + for i, pattern := range p.pattern { + if pattern == "" { + canTraverse = false + continue + } + if pattern == zeroToManyDirs { + if i == len(p.pattern)-1 { + break + } + canTraverse = true + continue + } + if strings.Contains(pattern, zeroToManyDirs) { + return false + } + if len(path) == 0 { + return false + } + if canTraverse { + canTraverse = false + for len(path) > 0 { + e := path[0] + path = path[1:] + if match, err := filepath.Match(pattern, e); err != nil { + return false + } else if match { + matched = true + break + } else if len(path) == 0 { + // if nothing left then fail + matched = false + } + } + } else { + if match, err := filepath.Match(pattern, path[0]); err != nil || !match { + return false + } + matched = true + path = path[1:] + } + } + if matched && p.dirOnly && !isDir && len(path) == 0 { + matched = false + } + return matched +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go new file mode 100644 index 0000000000..7768bd6505 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/decoder.go @@ -0,0 +1,177 @@ +package idxfile + +import ( + "bufio" + "bytes" + "errors" + "io" + + "github.com/go-git/go-git/v5/utils/binary" +) + +var ( + // ErrUnsupportedVersion is returned by Decode when the idx file version + // is not supported. + ErrUnsupportedVersion = errors.New("Unsupported version") + // ErrMalformedIdxFile is returned by Decode when the idx file is corrupted. + ErrMalformedIdxFile = errors.New("Malformed IDX file") +) + +const ( + fanout = 256 + objectIDLength = 20 +) + +// Decoder reads and decodes idx files from an input stream. +type Decoder struct { + *bufio.Reader +} + +// NewDecoder builds a new idx stream decoder, that reads from r. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{bufio.NewReader(r)} +} + +// Decode reads from the stream and decode the content into the MemoryIndex struct. +func (d *Decoder) Decode(idx *MemoryIndex) error { + if err := validateHeader(d); err != nil { + return err + } + + flow := []func(*MemoryIndex, io.Reader) error{ + readVersion, + readFanout, + readObjectNames, + readCRC32, + readOffsets, + readChecksums, + } + + for _, f := range flow { + if err := f(idx, d); err != nil { + return err + } + } + + return nil +} + +func validateHeader(r io.Reader) error { + var h = make([]byte, 4) + if _, err := io.ReadFull(r, h); err != nil { + return err + } + + if !bytes.Equal(h, idxHeader) { + return ErrMalformedIdxFile + } + + return nil +} + +func readVersion(idx *MemoryIndex, r io.Reader) error { + v, err := binary.ReadUint32(r) + if err != nil { + return err + } + + if v > VersionSupported { + return ErrUnsupportedVersion + } + + idx.Version = v + return nil +} + +func readFanout(idx *MemoryIndex, r io.Reader) error { + for k := 0; k < fanout; k++ { + n, err := binary.ReadUint32(r) + if err != nil { + return err + } + + idx.Fanout[k] = n + idx.FanoutMapping[k] = noMapping + } + + return nil +} + +func readObjectNames(idx *MemoryIndex, r io.Reader) error { + for k := 0; k < fanout; k++ { + var buckets uint32 + if k == 0 { + buckets = idx.Fanout[k] + } else { + buckets = idx.Fanout[k] - idx.Fanout[k-1] + } + + if buckets == 0 { + continue + } + + idx.FanoutMapping[k] = len(idx.Names) + + nameLen := int(buckets * objectIDLength) + bin := make([]byte, nameLen) + if _, err := io.ReadFull(r, bin); err != nil { + return err + } + + idx.Names = append(idx.Names, bin) + idx.Offset32 = append(idx.Offset32, make([]byte, buckets*4)) + idx.CRC32 = append(idx.CRC32, make([]byte, buckets*4)) + } + + return nil +} + +func readCRC32(idx *MemoryIndex, r io.Reader) error { + for k := 0; k < fanout; k++ { + if pos := idx.FanoutMapping[k]; pos != noMapping { + if _, err := io.ReadFull(r, idx.CRC32[pos]); err != nil { + return err + } + } + } + + return nil +} + +func readOffsets(idx *MemoryIndex, r io.Reader) error { + var o64cnt int + for k := 0; k < fanout; k++ { + if pos := idx.FanoutMapping[k]; pos != noMapping { + if _, err := io.ReadFull(r, idx.Offset32[pos]); err != nil { + return err + } + + for p := 0; p < len(idx.Offset32[pos]); p += 4 { + if idx.Offset32[pos][p]&(byte(1)<<7) > 0 { + o64cnt++ + } + } + } + } + + if o64cnt > 0 { + idx.Offset64 = make([]byte, o64cnt*8) + if _, err := io.ReadFull(r, idx.Offset64); err != nil { + return err + } + } + + return nil +} + +func readChecksums(idx *MemoryIndex, r io.Reader) error { + if _, err := io.ReadFull(r, idx.PackfileChecksum[:]); err != nil { + return err + } + + if _, err := io.ReadFull(r, idx.IdxChecksum[:]); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go new file mode 100644 index 0000000000..1e628ab4a5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/doc.go @@ -0,0 +1,128 @@ +// Package idxfile implements encoding and decoding of packfile idx files. +// +// == Original (version 1) pack-*.idx files have the following format: +// +// - The header consists of 256 4-byte network byte order +// integers. N-th entry of this table records the number of +// objects in the corresponding pack, the first byte of whose +// object name is less than or equal to N. This is called the +// 'first-level fan-out' table. +// +// - The header is followed by sorted 24-byte entries, one entry +// per object in the pack. Each entry is: +// +// 4-byte network byte order integer, recording where the +// object is stored in the packfile as the offset from the +// beginning. +// +// 20-byte object name. +// +// - The file is concluded with a trailer: +// +// A copy of the 20-byte SHA1 checksum at the end of +// corresponding packfile. +// +// 20-byte SHA1-checksum of all of the above. +// +// Pack Idx file: +// +// -- +--------------------------------+ +// fanout | fanout[0] = 2 (for example) |-. +// table +--------------------------------+ | +// | fanout[1] | | +// +--------------------------------+ | +// | fanout[2] | | +// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | +// | fanout[255] = total objects |---. +// -- +--------------------------------+ | | +// main | offset | | | +// index | object name 00XXXXXXXXXXXXXXXX | | | +// tab +--------------------------------+ | | +// | offset | | | +// | object name 00XXXXXXXXXXXXXXXX | | | +// +--------------------------------+<+ | +// .-| offset | | +// | | object name 01XXXXXXXXXXXXXXXX | | +// | +--------------------------------+ | +// | | offset | | +// | | object name 01XXXXXXXXXXXXXXXX | | +// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | +// | | offset | | +// | | object name FFXXXXXXXXXXXXXXXX | | +// --| +--------------------------------+<--+ +// trailer | | packfile checksum | +// | +--------------------------------+ +// | | idxfile checksum | +// | +--------------------------------+ +// .---------. +// | +// Pack file entry: <+ +// +// packed object header: +// 1-byte size extension bit (MSB) +// type (next 3 bit) +// size0 (lower 4-bit) +// n-byte sizeN (as long as MSB is set, each 7-bit) +// size0..sizeN form 4+7+7+..+7 bit integer, size0 +// is the least significant part, and sizeN is the +// most significant part. +// packed object data: +// If it is not DELTA, then deflated bytes (the size above +// is the size before compression). +// If it is REF_DELTA, then +// 20-byte base object name SHA1 (the size above is the +// size of the delta data that follows). +// delta data, deflated. +// If it is OFS_DELTA, then +// n-byte offset (see below) interpreted as a negative +// offset from the type-byte of the header of the +// ofs-delta entry (the size above is the size of +// the delta data that follows). +// delta data, deflated. +// +// offset encoding: +// n bytes with MSB set in all but the last one. +// The offset is then the number constructed by +// concatenating the lower 7 bit of each byte, and +// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1)) +// to the result. +// +// == Version 2 pack-*.idx files support packs larger than 4 GiB, and +// have some other reorganizations. They have the format: +// +// - A 4-byte magic number '\377tOc' which is an unreasonable +// fanout[0] value. +// +// - A 4-byte version number (= 2) +// +// - A 256-entry fan-out table just like v1. +// +// - A table of sorted 20-byte SHA1 object names. These are +// packed together without offset values to reduce the cache +// footprint of the binary search for a specific object name. +// +// - A table of 4-byte CRC32 values of the packed object data. +// This is new in v2 so compressed data can be copied directly +// from pack to pack during repacking without undetected +// data corruption. +// +// - A table of 4-byte offset values (in network byte order). +// These are usually 31-bit pack file offsets, but large +// offsets are encoded as an index into the next table with +// the msbit set. +// +// - A table of 8-byte offset entries (empty for pack files less +// than 2 GiB). Pack files are organized with heavily used +// objects toward the front, so most object references should +// not need to refer to this table. +// +// - The same trailer as a v1 pack file: +// +// A copy of the 20-byte SHA1 checksum at the end of +// corresponding packfile. +// +// 20-byte SHA1-checksum of all of the above. +// +// Source: +// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt +package idxfile diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go new file mode 100644 index 0000000000..26b2e4d6b5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/encoder.go @@ -0,0 +1,142 @@ +package idxfile + +import ( + "crypto/sha1" + "hash" + "io" + + "github.com/go-git/go-git/v5/utils/binary" +) + +// Encoder writes MemoryIndex structs to an output stream. +type Encoder struct { + io.Writer + hash hash.Hash +} + +// NewEncoder returns a new stream encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + h := sha1.New() + mw := io.MultiWriter(w, h) + return &Encoder{mw, h} +} + +// Encode encodes an MemoryIndex to the encoder writer. +func (e *Encoder) Encode(idx *MemoryIndex) (int, error) { + flow := []func(*MemoryIndex) (int, error){ + e.encodeHeader, + e.encodeFanout, + e.encodeHashes, + e.encodeCRC32, + e.encodeOffsets, + e.encodeChecksums, + } + + sz := 0 + for _, f := range flow { + i, err := f(idx) + sz += i + + if err != nil { + return sz, err + } + } + + return sz, nil +} + +func (e *Encoder) encodeHeader(idx *MemoryIndex) (int, error) { + c, err := e.Write(idxHeader) + if err != nil { + return c, err + } + + return c + 4, binary.WriteUint32(e, idx.Version) +} + +func (e *Encoder) encodeFanout(idx *MemoryIndex) (int, error) { + for _, c := range idx.Fanout { + if err := binary.WriteUint32(e, c); err != nil { + return 0, err + } + } + + return fanout * 4, nil +} + +func (e *Encoder) encodeHashes(idx *MemoryIndex) (int, error) { + var size int + for k := 0; k < fanout; k++ { + pos := idx.FanoutMapping[k] + if pos == noMapping { + continue + } + + n, err := e.Write(idx.Names[pos]) + if err != nil { + return size, err + } + size += n + } + return size, nil +} + +func (e *Encoder) encodeCRC32(idx *MemoryIndex) (int, error) { + var size int + for k := 0; k < fanout; k++ { + pos := idx.FanoutMapping[k] + if pos == noMapping { + continue + } + + n, err := e.Write(idx.CRC32[pos]) + if err != nil { + return size, err + } + + size += n + } + + return size, nil +} + +func (e *Encoder) encodeOffsets(idx *MemoryIndex) (int, error) { + var size int + for k := 0; k < fanout; k++ { + pos := idx.FanoutMapping[k] + if pos == noMapping { + continue + } + + n, err := e.Write(idx.Offset32[pos]) + if err != nil { + return size, err + } + + size += n + } + + if len(idx.Offset64) > 0 { + n, err := e.Write(idx.Offset64) + if err != nil { + return size, err + } + + size += n + } + + return size, nil +} + +func (e *Encoder) encodeChecksums(idx *MemoryIndex) (int, error) { + if _, err := e.Write(idx.PackfileChecksum[:]); err != nil { + return 0, err + } + + copy(idx.IdxChecksum[:], e.hash.Sum(nil)[:20]) + if _, err := e.Write(idx.IdxChecksum[:]); err != nil { + return 0, err + } + + return 40, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go new file mode 100644 index 0000000000..64dd8dcef4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/idxfile.go @@ -0,0 +1,346 @@ +package idxfile + +import ( + "bytes" + "io" + "sort" + + encbin "encoding/binary" + + "github.com/go-git/go-git/v5/plumbing" +) + +const ( + // VersionSupported is the only idx version supported. + VersionSupported = 2 + + noMapping = -1 +) + +var ( + idxHeader = []byte{255, 't', 'O', 'c'} +) + +// Index represents an index of a packfile. +type Index interface { + // Contains checks whether the given hash is in the index. + Contains(h plumbing.Hash) (bool, error) + // FindOffset finds the offset in the packfile for the object with + // the given hash. + FindOffset(h plumbing.Hash) (int64, error) + // FindCRC32 finds the CRC32 of the object with the given hash. + FindCRC32(h plumbing.Hash) (uint32, error) + // FindHash finds the hash for the object with the given offset. + FindHash(o int64) (plumbing.Hash, error) + // Count returns the number of entries in the index. + Count() (int64, error) + // Entries returns an iterator to retrieve all index entries. + Entries() (EntryIter, error) + // EntriesByOffset returns an iterator to retrieve all index entries ordered + // by offset. + EntriesByOffset() (EntryIter, error) +} + +// MemoryIndex is the in memory representation of an idx file. +type MemoryIndex struct { + Version uint32 + Fanout [256]uint32 + // FanoutMapping maps the position in the fanout table to the position + // in the Names, Offset32 and CRC32 slices. This improves the memory + // usage by not needing an array with unnecessary empty slots. + FanoutMapping [256]int + Names [][]byte + Offset32 [][]byte + CRC32 [][]byte + Offset64 []byte + PackfileChecksum [20]byte + IdxChecksum [20]byte + + offsetHash map[int64]plumbing.Hash + offsetHashIsFull bool +} + +var _ Index = (*MemoryIndex)(nil) + +// NewMemoryIndex returns an instance of a new MemoryIndex. +func NewMemoryIndex() *MemoryIndex { + return &MemoryIndex{} +} + +func (idx *MemoryIndex) findHashIndex(h plumbing.Hash) (int, bool) { + k := idx.FanoutMapping[h[0]] + if k == noMapping { + return 0, false + } + + if len(idx.Names) <= k { + return 0, false + } + + data := idx.Names[k] + high := uint64(len(idx.Offset32[k])) >> 2 + if high == 0 { + return 0, false + } + + low := uint64(0) + for { + mid := (low + high) >> 1 + offset := mid * objectIDLength + + cmp := bytes.Compare(h[:], data[offset:offset+objectIDLength]) + if cmp < 0 { + high = mid + } else if cmp == 0 { + return int(mid), true + } else { + low = mid + 1 + } + + if low >= high { + break + } + } + + return 0, false +} + +// Contains implements the Index interface. +func (idx *MemoryIndex) Contains(h plumbing.Hash) (bool, error) { + _, ok := idx.findHashIndex(h) + return ok, nil +} + +// FindOffset implements the Index interface. +func (idx *MemoryIndex) FindOffset(h plumbing.Hash) (int64, error) { + if len(idx.FanoutMapping) <= int(h[0]) { + return 0, plumbing.ErrObjectNotFound + } + + k := idx.FanoutMapping[h[0]] + i, ok := idx.findHashIndex(h) + if !ok { + return 0, plumbing.ErrObjectNotFound + } + + offset := idx.getOffset(k, i) + + if !idx.offsetHashIsFull { + // Save the offset for reverse lookup + if idx.offsetHash == nil { + idx.offsetHash = make(map[int64]plumbing.Hash) + } + idx.offsetHash[int64(offset)] = h + } + + return int64(offset), nil +} + +const isO64Mask = uint64(1) << 31 + +func (idx *MemoryIndex) getOffset(firstLevel, secondLevel int) uint64 { + offset := secondLevel << 2 + ofs := encbin.BigEndian.Uint32(idx.Offset32[firstLevel][offset : offset+4]) + + if (uint64(ofs) & isO64Mask) != 0 { + offset := 8 * (uint64(ofs) & ^isO64Mask) + n := encbin.BigEndian.Uint64(idx.Offset64[offset : offset+8]) + return n + } + + return uint64(ofs) +} + +// FindCRC32 implements the Index interface. +func (idx *MemoryIndex) FindCRC32(h plumbing.Hash) (uint32, error) { + k := idx.FanoutMapping[h[0]] + i, ok := idx.findHashIndex(h) + if !ok { + return 0, plumbing.ErrObjectNotFound + } + + return idx.getCRC32(k, i), nil +} + +func (idx *MemoryIndex) getCRC32(firstLevel, secondLevel int) uint32 { + offset := secondLevel << 2 + return encbin.BigEndian.Uint32(idx.CRC32[firstLevel][offset : offset+4]) +} + +// FindHash implements the Index interface. +func (idx *MemoryIndex) FindHash(o int64) (plumbing.Hash, error) { + var hash plumbing.Hash + var ok bool + + if idx.offsetHash != nil { + if hash, ok = idx.offsetHash[o]; ok { + return hash, nil + } + } + + // Lazily generate the reverse offset/hash map if required. + if !idx.offsetHashIsFull || idx.offsetHash == nil { + if err := idx.genOffsetHash(); err != nil { + return plumbing.ZeroHash, err + } + + hash, ok = idx.offsetHash[o] + } + + if !ok { + return plumbing.ZeroHash, plumbing.ErrObjectNotFound + } + + return hash, nil +} + +// genOffsetHash generates the offset/hash mapping for reverse search. +func (idx *MemoryIndex) genOffsetHash() error { + count, err := idx.Count() + if err != nil { + return err + } + + idx.offsetHash = make(map[int64]plumbing.Hash, count) + idx.offsetHashIsFull = true + + var hash plumbing.Hash + i := uint32(0) + for firstLevel, fanoutValue := range idx.Fanout { + mappedFirstLevel := idx.FanoutMapping[firstLevel] + for secondLevel := uint32(0); i < fanoutValue; i++ { + copy(hash[:], idx.Names[mappedFirstLevel][secondLevel*objectIDLength:]) + offset := int64(idx.getOffset(mappedFirstLevel, int(secondLevel))) + idx.offsetHash[offset] = hash + secondLevel++ + } + } + + return nil +} + +// Count implements the Index interface. +func (idx *MemoryIndex) Count() (int64, error) { + return int64(idx.Fanout[fanout-1]), nil +} + +// Entries implements the Index interface. +func (idx *MemoryIndex) Entries() (EntryIter, error) { + return &idxfileEntryIter{idx, 0, 0, 0}, nil +} + +// EntriesByOffset implements the Index interface. +func (idx *MemoryIndex) EntriesByOffset() (EntryIter, error) { + count, err := idx.Count() + if err != nil { + return nil, err + } + + iter := &idxfileEntryOffsetIter{ + entries: make(entriesByOffset, count), + } + + entries, err := idx.Entries() + if err != nil { + return nil, err + } + + for pos := 0; int64(pos) < count; pos++ { + entry, err := entries.Next() + if err != nil { + return nil, err + } + + iter.entries[pos] = entry + } + + sort.Sort(iter.entries) + + return iter, nil +} + +// EntryIter is an iterator that will return the entries in a packfile index. +type EntryIter interface { + // Next returns the next entry in the packfile index. + Next() (*Entry, error) + // Close closes the iterator. + Close() error +} + +type idxfileEntryIter struct { + idx *MemoryIndex + total int + firstLevel, secondLevel int +} + +func (i *idxfileEntryIter) Next() (*Entry, error) { + for { + if i.firstLevel >= fanout { + return nil, io.EOF + } + + if i.total >= int(i.idx.Fanout[i.firstLevel]) { + i.firstLevel++ + i.secondLevel = 0 + continue + } + + mappedFirstLevel := i.idx.FanoutMapping[i.firstLevel] + entry := new(Entry) + copy(entry.Hash[:], i.idx.Names[mappedFirstLevel][i.secondLevel*objectIDLength:]) + entry.Offset = i.idx.getOffset(mappedFirstLevel, i.secondLevel) + entry.CRC32 = i.idx.getCRC32(mappedFirstLevel, i.secondLevel) + + i.secondLevel++ + i.total++ + + return entry, nil + } +} + +func (i *idxfileEntryIter) Close() error { + i.firstLevel = fanout + return nil +} + +// Entry is the in memory representation of an object entry in the idx file. +type Entry struct { + Hash plumbing.Hash + CRC32 uint32 + Offset uint64 +} + +type idxfileEntryOffsetIter struct { + entries entriesByOffset + pos int +} + +func (i *idxfileEntryOffsetIter) Next() (*Entry, error) { + if i.pos >= len(i.entries) { + return nil, io.EOF + } + + entry := i.entries[i.pos] + i.pos++ + + return entry, nil +} + +func (i *idxfileEntryOffsetIter) Close() error { + i.pos = len(i.entries) + 1 + return nil +} + +type entriesByOffset []*Entry + +func (o entriesByOffset) Len() int { + return len(o) +} + +func (o entriesByOffset) Less(i int, j int) bool { + return o[i].Offset < o[j].Offset +} + +func (o entriesByOffset) Swap(i int, j int) { + o[i], o[j] = o[j], o[i] +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go new file mode 100644 index 0000000000..daa160502a --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/idxfile/writer.go @@ -0,0 +1,186 @@ +package idxfile + +import ( + "bytes" + "fmt" + "math" + "sort" + "sync" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/binary" +) + +// objects implements sort.Interface and uses hash as sorting key. +type objects []Entry + +// Writer implements a packfile Observer interface and is used to generate +// indexes. +type Writer struct { + m sync.Mutex + + count uint32 + checksum plumbing.Hash + objects objects + offset64 uint32 + finished bool + index *MemoryIndex + added map[plumbing.Hash]struct{} +} + +// Index returns a previously created MemoryIndex or creates a new one if +// needed. +func (w *Writer) Index() (*MemoryIndex, error) { + w.m.Lock() + defer w.m.Unlock() + + if w.index == nil { + return w.createIndex() + } + + return w.index, nil +} + +// Add appends new object data. +func (w *Writer) Add(h plumbing.Hash, pos uint64, crc uint32) { + w.m.Lock() + defer w.m.Unlock() + + if w.added == nil { + w.added = make(map[plumbing.Hash]struct{}) + } + + if _, ok := w.added[h]; !ok { + w.added[h] = struct{}{} + w.objects = append(w.objects, Entry{h, crc, pos}) + } + +} + +func (w *Writer) Finished() bool { + return w.finished +} + +// OnHeader implements packfile.Observer interface. +func (w *Writer) OnHeader(count uint32) error { + w.count = count + w.objects = make(objects, 0, count) + return nil +} + +// OnInflatedObjectHeader implements packfile.Observer interface. +func (w *Writer) OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error { + return nil +} + +// OnInflatedObjectContent implements packfile.Observer interface. +func (w *Writer) OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, _ []byte) error { + w.Add(h, uint64(pos), crc) + return nil +} + +// OnFooter implements packfile.Observer interface. +func (w *Writer) OnFooter(h plumbing.Hash) error { + w.checksum = h + w.finished = true + _, err := w.createIndex() + if err != nil { + return err + } + + return nil +} + +// creatIndex returns a filled MemoryIndex with the information filled by +// the observer callbacks. +func (w *Writer) createIndex() (*MemoryIndex, error) { + if !w.finished { + return nil, fmt.Errorf("the index still hasn't finished building") + } + + idx := new(MemoryIndex) + w.index = idx + + sort.Sort(w.objects) + + // unmap all fans by default + for i := range idx.FanoutMapping { + idx.FanoutMapping[i] = noMapping + } + + buf := new(bytes.Buffer) + + last := -1 + bucket := -1 + for i, o := range w.objects { + fan := o.Hash[0] + + // fill the gaps between fans + for j := last + 1; j < int(fan); j++ { + idx.Fanout[j] = uint32(i) + } + + // update the number of objects for this position + idx.Fanout[fan] = uint32(i + 1) + + // we move from one bucket to another, update counters and allocate + // memory + if last != int(fan) { + bucket++ + idx.FanoutMapping[fan] = bucket + last = int(fan) + + idx.Names = append(idx.Names, make([]byte, 0)) + idx.Offset32 = append(idx.Offset32, make([]byte, 0)) + idx.CRC32 = append(idx.CRC32, make([]byte, 0)) + } + + idx.Names[bucket] = append(idx.Names[bucket], o.Hash[:]...) + + offset := o.Offset + if offset > math.MaxInt32 { + offset = w.addOffset64(offset) + } + + buf.Truncate(0) + binary.WriteUint32(buf, uint32(offset)) + idx.Offset32[bucket] = append(idx.Offset32[bucket], buf.Bytes()...) + + buf.Truncate(0) + binary.WriteUint32(buf, o.CRC32) + idx.CRC32[bucket] = append(idx.CRC32[bucket], buf.Bytes()...) + } + + for j := last + 1; j < 256; j++ { + idx.Fanout[j] = uint32(len(w.objects)) + } + + idx.Version = VersionSupported + idx.PackfileChecksum = w.checksum + + return idx, nil +} + +func (w *Writer) addOffset64(pos uint64) uint64 { + buf := new(bytes.Buffer) + binary.WriteUint64(buf, pos) + w.index.Offset64 = append(w.index.Offset64, buf.Bytes()...) + + index := uint64(w.offset64 | (1 << 31)) + w.offset64++ + + return index +} + +func (o objects) Len() int { + return len(o) +} + +func (o objects) Less(i int, j int) bool { + cmp := bytes.Compare(o[i].Hash[:], o[j].Hash[:]) + return cmp < 0 +} + +func (o objects) Swap(i int, j int) { + o[i], o[j] = o[j], o[i] +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go new file mode 100644 index 0000000000..036b6365e6 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go @@ -0,0 +1,479 @@ +package index + +import ( + "bufio" + "bytes" + "crypto/sha1" + "errors" + "hash" + "io" + "io/ioutil" + "strconv" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/binary" +) + +var ( + // DecodeVersionSupported is the range of supported index versions + DecodeVersionSupported = struct{ Min, Max uint32 }{Min: 2, Max: 4} + + // ErrMalformedSignature is returned by Decode when the index header file is + // malformed + ErrMalformedSignature = errors.New("malformed index signature file") + // ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with + // the read content + ErrInvalidChecksum = errors.New("invalid checksum") + + errUnknownExtension = errors.New("unknown extension") +) + +const ( + entryHeaderLength = 62 + entryExtended = 0x4000 + entryValid = 0x8000 + nameMask = 0xfff + intentToAddMask = 1 << 13 + skipWorkTreeMask = 1 << 14 +) + +// A Decoder reads and decodes index files from an input stream. +type Decoder struct { + r io.Reader + hash hash.Hash + lastEntry *Entry + + extReader *bufio.Reader +} + +// NewDecoder returns a new decoder that reads from r. +func NewDecoder(r io.Reader) *Decoder { + h := sha1.New() + return &Decoder{ + r: io.TeeReader(r, h), + hash: h, + extReader: bufio.NewReader(nil), + } +} + +// Decode reads the whole index object from its input and stores it in the +// value pointed to by idx. +func (d *Decoder) Decode(idx *Index) error { + var err error + idx.Version, err = validateHeader(d.r) + if err != nil { + return err + } + + entryCount, err := binary.ReadUint32(d.r) + if err != nil { + return err + } + + if err := d.readEntries(idx, int(entryCount)); err != nil { + return err + } + + return d.readExtensions(idx) +} + +func (d *Decoder) readEntries(idx *Index, count int) error { + for i := 0; i < count; i++ { + e, err := d.readEntry(idx) + if err != nil { + return err + } + + d.lastEntry = e + idx.Entries = append(idx.Entries, e) + } + + return nil +} + +func (d *Decoder) readEntry(idx *Index) (*Entry, error) { + e := &Entry{} + + var msec, mnsec, sec, nsec uint32 + var flags uint16 + + flow := []interface{}{ + &sec, &nsec, + &msec, &mnsec, + &e.Dev, + &e.Inode, + &e.Mode, + &e.UID, + &e.GID, + &e.Size, + &e.Hash, + &flags, + } + + if err := binary.Read(d.r, flow...); err != nil { + return nil, err + } + + read := entryHeaderLength + + if sec != 0 || nsec != 0 { + e.CreatedAt = time.Unix(int64(sec), int64(nsec)) + } + + if msec != 0 || mnsec != 0 { + e.ModifiedAt = time.Unix(int64(msec), int64(mnsec)) + } + + e.Stage = Stage(flags>>12) & 0x3 + + if flags&entryExtended != 0 { + extended, err := binary.ReadUint16(d.r) + if err != nil { + return nil, err + } + + read += 2 + e.IntentToAdd = extended&intentToAddMask != 0 + e.SkipWorktree = extended&skipWorkTreeMask != 0 + } + + if err := d.readEntryName(idx, e, flags); err != nil { + return nil, err + } + + return e, d.padEntry(idx, e, read) +} + +func (d *Decoder) readEntryName(idx *Index, e *Entry, flags uint16) error { + var name string + var err error + + switch idx.Version { + case 2, 3: + len := flags & nameMask + name, err = d.doReadEntryName(len) + case 4: + name, err = d.doReadEntryNameV4() + default: + return ErrUnsupportedVersion + } + + if err != nil { + return err + } + + e.Name = name + return nil +} + +func (d *Decoder) doReadEntryNameV4() (string, error) { + l, err := binary.ReadVariableWidthInt(d.r) + if err != nil { + return "", err + } + + var base string + if d.lastEntry != nil { + base = d.lastEntry.Name[:len(d.lastEntry.Name)-int(l)] + } + + name, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return "", err + } + + return base + string(name), nil +} + +func (d *Decoder) doReadEntryName(len uint16) (string, error) { + name := make([]byte, len) + _, err := io.ReadFull(d.r, name) + + return string(name), err +} + +// Index entries are padded out to the next 8 byte alignment +// for historical reasons related to how C Git read the files. +func (d *Decoder) padEntry(idx *Index, e *Entry, read int) error { + if idx.Version == 4 { + return nil + } + + entrySize := read + len(e.Name) + padLen := 8 - entrySize%8 + _, err := io.CopyN(ioutil.Discard, d.r, int64(padLen)) + return err +} + +func (d *Decoder) readExtensions(idx *Index) error { + // TODO: support 'Split index' and 'Untracked cache' extensions, take in + // count that they are not supported by jgit or libgit + + var expected []byte + var err error + + var header [4]byte + for { + expected = d.hash.Sum(nil) + + var n int + if n, err = io.ReadFull(d.r, header[:]); err != nil { + if n == 0 { + err = io.EOF + } + + break + } + + err = d.readExtension(idx, header[:]) + if err != nil { + break + } + } + + if err != errUnknownExtension { + return err + } + + return d.readChecksum(expected, header) +} + +func (d *Decoder) readExtension(idx *Index, header []byte) error { + switch { + case bytes.Equal(header, treeExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.Cache = &Tree{} + d := &treeExtensionDecoder{r} + if err := d.Decode(idx.Cache); err != nil { + return err + } + case bytes.Equal(header, resolveUndoExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.ResolveUndo = &ResolveUndo{} + d := &resolveUndoDecoder{r} + if err := d.Decode(idx.ResolveUndo); err != nil { + return err + } + case bytes.Equal(header, endOfIndexEntryExtSignature): + r, err := d.getExtensionReader() + if err != nil { + return err + } + + idx.EndOfIndexEntry = &EndOfIndexEntry{} + d := &endOfIndexEntryDecoder{r} + if err := d.Decode(idx.EndOfIndexEntry); err != nil { + return err + } + default: + return errUnknownExtension + } + + return nil +} + +func (d *Decoder) getExtensionReader() (*bufio.Reader, error) { + len, err := binary.ReadUint32(d.r) + if err != nil { + return nil, err + } + + d.extReader.Reset(&io.LimitedReader{R: d.r, N: int64(len)}) + return d.extReader, nil +} + +func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error { + var h plumbing.Hash + copy(h[:4], alreadyRead[:]) + + if _, err := io.ReadFull(d.r, h[4:]); err != nil { + return err + } + + if !bytes.Equal(h[:], expected) { + return ErrInvalidChecksum + } + + return nil +} + +func validateHeader(r io.Reader) (version uint32, err error) { + var s = make([]byte, 4) + if _, err := io.ReadFull(r, s); err != nil { + return 0, err + } + + if !bytes.Equal(s, indexSignature) { + return 0, ErrMalformedSignature + } + + version, err = binary.ReadUint32(r) + if err != nil { + return 0, err + } + + if version < DecodeVersionSupported.Min || version > DecodeVersionSupported.Max { + return 0, ErrUnsupportedVersion + } + + return +} + +type treeExtensionDecoder struct { + r *bufio.Reader +} + +func (d *treeExtensionDecoder) Decode(t *Tree) error { + for { + e, err := d.readEntry() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if e == nil { + continue + } + + t.Entries = append(t.Entries, *e) + } +} + +func (d *treeExtensionDecoder) readEntry() (*TreeEntry, error) { + e := &TreeEntry{} + + path, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return nil, err + } + + e.Path = string(path) + + count, err := binary.ReadUntil(d.r, ' ') + if err != nil { + return nil, err + } + + i, err := strconv.Atoi(string(count)) + if err != nil { + return nil, err + } + + // An entry can be in an invalidated state and is represented by having a + // negative number in the entry_count field. + if i == -1 { + return nil, nil + } + + e.Entries = i + trees, err := binary.ReadUntil(d.r, '\n') + if err != nil { + return nil, err + } + + i, err = strconv.Atoi(string(trees)) + if err != nil { + return nil, err + } + + e.Trees = i + _, err = io.ReadFull(d.r, e.Hash[:]) + if err != nil { + return nil, err + } + return e, nil +} + +type resolveUndoDecoder struct { + r *bufio.Reader +} + +func (d *resolveUndoDecoder) Decode(ru *ResolveUndo) error { + for { + e, err := d.readEntry() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + ru.Entries = append(ru.Entries, *e) + } +} + +func (d *resolveUndoDecoder) readEntry() (*ResolveUndoEntry, error) { + e := &ResolveUndoEntry{ + Stages: make(map[Stage]plumbing.Hash), + } + + path, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return nil, err + } + + e.Path = string(path) + + for i := 0; i < 3; i++ { + if err := d.readStage(e, Stage(i+1)); err != nil { + return nil, err + } + } + + for s := range e.Stages { + var hash plumbing.Hash + if _, err := io.ReadFull(d.r, hash[:]); err != nil { + return nil, err + } + + e.Stages[s] = hash + } + + return e, nil +} + +func (d *resolveUndoDecoder) readStage(e *ResolveUndoEntry, s Stage) error { + ascii, err := binary.ReadUntil(d.r, '\x00') + if err != nil { + return err + } + + stage, err := strconv.ParseInt(string(ascii), 8, 64) + if err != nil { + return err + } + + if stage != 0 { + e.Stages[s] = plumbing.ZeroHash + } + + return nil +} + +type endOfIndexEntryDecoder struct { + r *bufio.Reader +} + +func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error { + var err error + e.Offset, err = binary.ReadUint32(d.r) + if err != nil { + return err + } + + _, err = io.ReadFull(d.r, e.Hash[:]) + return err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go new file mode 100644 index 0000000000..39ae6ad5f9 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/doc.go @@ -0,0 +1,360 @@ +// Package index implements encoding and decoding of index format files. +// +// Git index format +// ================ +// +// == The Git index file has the following format +// +// All binary numbers are in network byte order. Version 2 is described +// here unless stated otherwise. +// +// - A 12-byte header consisting of +// +// 4-byte signature: +// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache") +// +// 4-byte version number: +// The current supported versions are 2, 3 and 4. +// +// 32-bit number of index entries. +// +// - A number of sorted index entries (see below). +// +// - Extensions +// +// Extensions are identified by signature. Optional extensions can +// be ignored if Git does not understand them. +// +// Git currently supports cached tree and resolve undo extensions. +// +// 4-byte extension signature. If the first byte is 'A'..'Z' the +// extension is optional and can be ignored. +// +// 32-bit size of the extension +// +// Extension data +// +// - 160-bit SHA-1 over the content of the index file before this +// checksum. +// +// == Index entry +// +// Index entries are sorted in ascending order on the name field, +// interpreted as a string of unsigned bytes (i.e. memcmp() order, no +// localization, no special casing of directory separator '/'). Entries +// with the same name are sorted by their stage field. +// +// 32-bit ctime seconds, the last time a file's metadata changed +// this is stat(2) data +// +// 32-bit ctime nanosecond fractions +// this is stat(2) data +// +// 32-bit mtime seconds, the last time a file's data changed +// this is stat(2) data +// +// 32-bit mtime nanosecond fractions +// this is stat(2) data +// +// 32-bit dev +// this is stat(2) data +// +// 32-bit ino +// this is stat(2) data +// +// 32-bit mode, split into (high to low bits) +// +// 4-bit object type +// valid values in binary are 1000 (regular file), 1010 (symbolic link) +// and 1110 (gitlink) +// +// 3-bit unused +// +// 9-bit unix permission. Only 0755 and 0644 are valid for regular files. +// Symbolic links and gitlinks have value 0 in this field. +// +// 32-bit uid +// this is stat(2) data +// +// 32-bit gid +// this is stat(2) data +// +// 32-bit file size +// This is the on-disk size from stat(2), truncated to 32-bit. +// +// 160-bit SHA-1 for the represented object +// +// A 16-bit 'flags' field split into (high to low bits) +// +// 1-bit assume-valid flag +// +// 1-bit extended flag (must be zero in version 2) +// +// 2-bit stage (during merge) +// +// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF +// is stored in this field. +// +// (Version 3 or later) A 16-bit field, only applicable if the +// "extended flag" above is 1, split into (high to low bits). +// +// 1-bit reserved for future +// +// 1-bit skip-worktree flag (used by sparse checkout) +// +// 1-bit intent-to-add flag (used by "git add -N") +// +// 13-bit unused, must be zero +// +// Entry path name (variable length) relative to top level directory +// (without leading slash). '/' is used as path separator. The special +// path components ".", ".." and ".git" (without quotes) are disallowed. +// Trailing slash is also disallowed. +// +// The exact encoding is undefined, but the '.' and '/' characters +// are encoded in 7-bit ASCII and the encoding cannot contain a NUL +// byte (iow, this is a UNIX pathname). +// +// (Version 4) In version 4, the entry path name is prefix-compressed +// relative to the path name for the previous entry (the very first +// entry is encoded as if the path name for the previous entry is an +// empty string). At the beginning of an entry, an integer N in the +// variable width encoding (the same encoding as the offset is encoded +// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed +// by a NUL-terminated string S. Removing N bytes from the end of the +// path name for the previous entry, and replacing it with the string S +// yields the path name for this entry. +// +// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes +// while keeping the name NUL-terminated. +// +// (Version 4) In version 4, the padding after the pathname does not +// exist. +// +// Interpretation of index entries in split index mode is completely +// different. See below for details. +// +// == Extensions +// +// === Cached tree +// +// Cached tree extension contains pre-computed hashes for trees that can +// be derived from the index. It helps speed up tree object generation +// from index for a new commit. +// +// When a path is updated in index, the path must be invalidated and +// removed from tree cache. +// +// The signature for this extension is { 'T', 'R', 'E', 'E' }. +// +// A series of entries fill the entire extension; each of which +// consists of: +// +// - NUL-terminated path component (relative to its parent directory); +// +// - ASCII decimal number of entries in the index that is covered by the +// tree this entry represents (entry_count); +// +// - A space (ASCII 32); +// +// - ASCII decimal number that represents the number of subtrees this +// tree has; +// +// - A newline (ASCII 10); and +// +// - 160-bit object name for the object that would result from writing +// this span of index as a tree. +// +// An entry can be in an invalidated state and is represented by having +// a negative number in the entry_count field. In this case, there is no +// object name and the next entry starts immediately after the newline. +// When writing an invalid entry, -1 should always be used as entry_count. +// +// The entries are written out in the top-down, depth-first order. The +// first entry represents the root level of the repository, followed by the +// first subtree--let's call this A--of the root level (with its name +// relative to the root level), followed by the first subtree of A (with +// its name relative to A), ... +// +// === Resolve undo +// +// A conflict is represented in the index as a set of higher stage entries. +// When a conflict is resolved (e.g. with "git add path"), these higher +// stage entries will be removed and a stage-0 entry with proper resolution +// is added. +// +// When these higher stage entries are removed, they are saved in the +// resolve undo extension, so that conflicts can be recreated (e.g. with +// "git checkout -m"), in case users want to redo a conflict resolution +// from scratch. +// +// The signature for this extension is { 'R', 'E', 'U', 'C' }. +// +// A series of entries fill the entire extension; each of which +// consists of: +// +// - NUL-terminated pathname the entry describes (relative to the root of +// the repository, i.e. full pathname); +// +// - Three NUL-terminated ASCII octal numbers, entry mode of entries in +// stage 1 to 3 (a missing stage is represented by "0" in this field); +// and +// +// - At most three 160-bit object names of the entry in stages from 1 to 3 +// (nothing is written for a missing stage). +// +// === Split index +// +// In split index mode, the majority of index entries could be stored +// in a separate file. This extension records the changes to be made on +// top of that to produce the final index. +// +// The signature for this extension is { 'l', 'i', 'n', 'k' }. +// +// The extension consists of: +// +// - 160-bit SHA-1 of the shared index file. The shared index file path +// is $GIT_DIR/sharedindex.. If all 160 bits are zero, the +// index does not require a shared index file. +// +// - An ewah-encoded delete bitmap, each bit represents an entry in the +// shared index. If a bit is set, its corresponding entry in the +// shared index will be removed from the final index. Note, because +// a delete operation changes index entry positions, but we do need +// original positions in replace phase, it's best to just mark +// entries for removal, then do a mass deletion after replacement. +// +// - An ewah-encoded replace bitmap, each bit represents an entry in +// the shared index. If a bit is set, its corresponding entry in the +// shared index will be replaced with an entry in this index +// file. All replaced entries are stored in sorted order in this +// index. The first "1" bit in the replace bitmap corresponds to the +// first index entry, the second "1" bit to the second entry and so +// on. Replaced entries may have empty path names to save space. +// +// The remaining index entries after replaced ones will be added to the +// final index. These added entries are also sorted by entry name then +// stage. +// +// == Untracked cache +// +// Untracked cache saves the untracked file list and necessary data to +// verify the cache. The signature for this extension is { 'U', 'N', +// 'T', 'R' }. +// +// The extension starts with +// +// - A sequence of NUL-terminated strings, preceded by the size of the +// sequence in variable width encoding. Each string describes the +// environment where the cache can be used. +// +// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from +// ctime field until "file size". +// +// - Stat data of plumbing.excludesfile +// +// - 32-bit dir_flags (see struct dir_struct) +// +// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file +// does not exist. +// +// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does +// not exist. +// +// - NUL-terminated string of per-dir exclude file name. This usually +// is ".gitignore". +// +// - The number of following directory blocks, variable width +// encoding. If this number is zero, the extension ends here with a +// following NUL. +// +// - A number of directory blocks in depth-first-search order, each +// consists of +// +// - The number of untracked entries, variable width encoding. +// +// - The number of sub-directory blocks, variable width encoding. +// +// - The directory name terminated by NUL. +// +// - A number of untracked file/dir names terminated by NUL. +// +// The remaining data of each directory block is grouped by type: +// +// - An ewah bitmap, the n-th bit marks whether the n-th directory has +// valid untracked cache entries. +// +// - An ewah bitmap, the n-th bit records "check-only" bit of +// read_directory_recursive() for the n-th directory. +// +// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data +// is valid for the n-th directory and exists in the next data. +// +// - An array of stat data. The n-th data corresponds with the n-th +// "one" bit in the previous ewah bitmap. +// +// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit +// in the previous ewah bitmap. +// +// - One NUL. +// +// == File System Monitor cache +// +// The file system monitor cache tracks files for which the core.fsmonitor +// hook has told us about changes. The signature for this extension is +// { 'F', 'S', 'M', 'N' }. +// +// The extension starts with +// +// - 32-bit version number: the current supported version is 1. +// +// - 64-bit time: the extension data reflects all changes through the given +// time which is stored as the nanoseconds elapsed since midnight, +// January 1, 1970. +// +// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap. +// +// - An ewah bitmap, the n-th bit indicates whether the n-th index entry +// is not CE_FSMONITOR_VALID. +// +// == End of Index Entry +// +// The End of Index Entry (EOIE) is used to locate the end of the variable +// length index entries and the beginning of the extensions. Code can take +// advantage of this to quickly locate the index extensions without having +// to parse through all of the index entries. +// +// Because it must be able to be loaded before the variable length cache +// entries and other index extensions, this extension must be written last. +// The signature for this extension is { 'E', 'O', 'I', 'E' }. +// +// The extension consists of: +// +// - 32-bit offset to the end of the index entries +// +// - 160-bit SHA-1 over the extension types and their sizes (but not +// their contents). E.g. if we have "TREE" extension that is N-bytes +// long, "REUC" extension that is M-bytes long, followed by "EOIE", +// then the hash would be: +// +// SHA-1("TREE" + + +// "REUC" + ) +// +// == Index Entry Offset Table +// +// The Index Entry Offset Table (IEOT) is used to help address the CPU +// cost of loading the index by enabling multi-threading the process of +// converting cache entries from the on-disk format to the in-memory format. +// The signature for this extension is { 'I', 'E', 'O', 'T' }. +// +// The extension consists of: +// +// - 32-bit version (currently 1) +// +// - A number of index offset entries each consisting of: +// +// - 32-bit offset from the beginning of the file to the first cache entry +// in this block of entries. +// +// - 32-bit count of cache entries in this blockpackage index +package index diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go new file mode 100644 index 0000000000..00d4e7a317 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go @@ -0,0 +1,150 @@ +package index + +import ( + "bytes" + "crypto/sha1" + "errors" + "hash" + "io" + "sort" + "time" + + "github.com/go-git/go-git/v5/utils/binary" +) + +var ( + // EncodeVersionSupported is the range of supported index versions + EncodeVersionSupported uint32 = 2 + + // ErrInvalidTimestamp is returned by Encode if a Index with a Entry with + // negative timestamp values + ErrInvalidTimestamp = errors.New("negative timestamps are not allowed") +) + +// An Encoder writes an Index to an output stream. +type Encoder struct { + w io.Writer + hash hash.Hash +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + h := sha1.New() + mw := io.MultiWriter(w, h) + return &Encoder{mw, h} +} + +// Encode writes the Index to the stream of the encoder. +func (e *Encoder) Encode(idx *Index) error { + // TODO: support versions v3 and v4 + // TODO: support extensions + if idx.Version != EncodeVersionSupported { + return ErrUnsupportedVersion + } + + if err := e.encodeHeader(idx); err != nil { + return err + } + + if err := e.encodeEntries(idx); err != nil { + return err + } + + return e.encodeFooter() +} + +func (e *Encoder) encodeHeader(idx *Index) error { + return binary.Write(e.w, + indexSignature, + idx.Version, + uint32(len(idx.Entries)), + ) +} + +func (e *Encoder) encodeEntries(idx *Index) error { + sort.Sort(byName(idx.Entries)) + + for _, entry := range idx.Entries { + if err := e.encodeEntry(entry); err != nil { + return err + } + + wrote := entryHeaderLength + len(entry.Name) + if err := e.padEntry(wrote); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeEntry(entry *Entry) error { + if entry.IntentToAdd || entry.SkipWorktree { + return ErrUnsupportedVersion + } + + sec, nsec, err := e.timeToUint32(&entry.CreatedAt) + if err != nil { + return err + } + + msec, mnsec, err := e.timeToUint32(&entry.ModifiedAt) + if err != nil { + return err + } + + flags := uint16(entry.Stage&0x3) << 12 + if l := len(entry.Name); l < nameMask { + flags |= uint16(l) + } else { + flags |= nameMask + } + + flow := []interface{}{ + sec, nsec, + msec, mnsec, + entry.Dev, + entry.Inode, + entry.Mode, + entry.UID, + entry.GID, + entry.Size, + entry.Hash[:], + flags, + } + + if err := binary.Write(e.w, flow...); err != nil { + return err + } + + return binary.Write(e.w, []byte(entry.Name)) +} + +func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) { + if t.IsZero() { + return 0, 0, nil + } + + if t.Unix() < 0 || t.UnixNano() < 0 { + return 0, 0, ErrInvalidTimestamp + } + + return uint32(t.Unix()), uint32(t.Nanosecond()), nil +} + +func (e *Encoder) padEntry(wrote int) error { + padLen := 8 - wrote%8 + + _, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen)) + return err +} + +func (e *Encoder) encodeFooter() error { + return binary.Write(e.w, e.hash.Sum(nil)) +} + +type byName []*Entry + +func (l byName) Len() int { return len(l) } +func (l byName) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byName) Less(i, j int) bool { return l[i].Name < l[j].Name } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go new file mode 100644 index 0000000000..649416a2b4 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/index.go @@ -0,0 +1,213 @@ +package index + +import ( + "bytes" + "errors" + "fmt" + "path/filepath" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" +) + +var ( + // ErrUnsupportedVersion is returned by Decode when the index file version + // is not supported. + ErrUnsupportedVersion = errors.New("unsupported version") + // ErrEntryNotFound is returned by Index.Entry, if an entry is not found. + ErrEntryNotFound = errors.New("entry not found") + + indexSignature = []byte{'D', 'I', 'R', 'C'} + treeExtSignature = []byte{'T', 'R', 'E', 'E'} + resolveUndoExtSignature = []byte{'R', 'E', 'U', 'C'} + endOfIndexEntryExtSignature = []byte{'E', 'O', 'I', 'E'} +) + +// Stage during merge +type Stage int + +const ( + // Merged is the default stage, fully merged + Merged Stage = 1 + // AncestorMode is the base revision + AncestorMode Stage = 1 + // OurMode is the first tree revision, ours + OurMode Stage = 2 + // TheirMode is the second tree revision, theirs + TheirMode Stage = 3 +) + +// Index contains the information about which objects are currently checked out +// in the worktree, having information about the working files. Changes in +// worktree are detected using this Index. The Index is also used during merges +type Index struct { + // Version is index version + Version uint32 + // Entries collection of entries represented by this Index. The order of + // this collection is not guaranteed + Entries []*Entry + // Cache represents the 'Cached tree' extension + Cache *Tree + // ResolveUndo represents the 'Resolve undo' extension + ResolveUndo *ResolveUndo + // EndOfIndexEntry represents the 'End of Index Entry' extension + EndOfIndexEntry *EndOfIndexEntry +} + +// Add creates a new Entry and returns it. The caller should first check that +// another entry with the same path does not exist. +func (i *Index) Add(path string) *Entry { + e := &Entry{ + Name: filepath.ToSlash(path), + } + + i.Entries = append(i.Entries, e) + return e +} + +// Entry returns the entry that match the given path, if any. +func (i *Index) Entry(path string) (*Entry, error) { + path = filepath.ToSlash(path) + for _, e := range i.Entries { + if e.Name == path { + return e, nil + } + } + + return nil, ErrEntryNotFound +} + +// Remove remove the entry that match the give path and returns deleted entry. +func (i *Index) Remove(path string) (*Entry, error) { + path = filepath.ToSlash(path) + for index, e := range i.Entries { + if e.Name == path { + i.Entries = append(i.Entries[:index], i.Entries[index+1:]...) + return e, nil + } + } + + return nil, ErrEntryNotFound +} + +// Glob returns the all entries matching pattern or nil if there is no matching +// entry. The syntax of patterns is the same as in filepath.Glob. +func (i *Index) Glob(pattern string) (matches []*Entry, err error) { + pattern = filepath.ToSlash(pattern) + for _, e := range i.Entries { + m, err := match(pattern, e.Name) + if err != nil { + return nil, err + } + + if m { + matches = append(matches, e) + } + } + + return +} + +// String is equivalent to `git ls-files --stage --debug` +func (i *Index) String() string { + buf := bytes.NewBuffer(nil) + for _, e := range i.Entries { + buf.WriteString(e.String()) + } + + return buf.String() +} + +// Entry represents a single file (or stage of a file) in the cache. An entry +// represents exactly one stage of a file. If a file path is unmerged then +// multiple Entry instances may appear for the same path name. +type Entry struct { + // Hash is the SHA1 of the represented file + Hash plumbing.Hash + // Name is the Entry path name relative to top level directory + Name string + // CreatedAt time when the tracked path was created + CreatedAt time.Time + // ModifiedAt time when the tracked path was changed + ModifiedAt time.Time + // Dev and Inode of the tracked path + Dev, Inode uint32 + // Mode of the path + Mode filemode.FileMode + // UID and GID, userid and group id of the owner + UID, GID uint32 + // Size is the length in bytes for regular files + Size uint32 + // Stage on a merge is defines what stage is representing this entry + // https://git-scm.com/book/en/v2/Git-Tools-Advanced-Merging + Stage Stage + // SkipWorktree used in sparse checkouts + // https://git-scm.com/docs/git-read-tree#_sparse_checkout + SkipWorktree bool + // IntentToAdd record only the fact that the path will be added later + // https://git-scm.com/docs/git-add ("git add -N") + IntentToAdd bool +} + +func (e Entry) String() string { + buf := bytes.NewBuffer(nil) + + fmt.Fprintf(buf, "%06o %s %d\t%s\n", e.Mode, e.Hash, e.Stage, e.Name) + fmt.Fprintf(buf, " ctime: %d:%d\n", e.CreatedAt.Unix(), e.CreatedAt.Nanosecond()) + fmt.Fprintf(buf, " mtime: %d:%d\n", e.ModifiedAt.Unix(), e.ModifiedAt.Nanosecond()) + fmt.Fprintf(buf, " dev: %d\tino: %d\n", e.Dev, e.Inode) + fmt.Fprintf(buf, " uid: %d\tgid: %d\n", e.UID, e.GID) + fmt.Fprintf(buf, " size: %d\tflags: %x\n", e.Size, 0) + + return buf.String() +} + +// Tree contains pre-computed hashes for trees that can be derived from the +// index. It helps speed up tree object generation from index for a new commit. +type Tree struct { + Entries []TreeEntry +} + +// TreeEntry entry of a cached Tree +type TreeEntry struct { + // Path component (relative to its parent directory) + Path string + // Entries is the number of entries in the index that is covered by the tree + // this entry represents. + Entries int + // Trees is the number that represents the number of subtrees this tree has + Trees int + // Hash object name for the object that would result from writing this span + // of index as a tree. + Hash plumbing.Hash +} + +// ResolveUndo is used when a conflict is resolved (e.g. with "git add path"), +// these higher stage entries are removed and a stage-0 entry with proper +// resolution is added. When these higher stage entries are removed, they are +// saved in the resolve undo extension. +type ResolveUndo struct { + Entries []ResolveUndoEntry +} + +// ResolveUndoEntry contains the information about a conflict when is resolved +type ResolveUndoEntry struct { + Path string + Stages map[Stage]plumbing.Hash +} + +// EndOfIndexEntry is the End of Index Entry (EOIE) is used to locate the end of +// the variable length index entries and the beginning of the extensions. Code +// can take advantage of this to quickly locate the index extensions without +// having to parse through all of the index entries. +// +// Because it must be able to be loaded before the variable length cache +// entries and other index extensions, this extension must be written last. +type EndOfIndexEntry struct { + // Offset to the end of the index entries + Offset uint32 + // Hash is a SHA-1 over the extension types and their sizes (but not + // their contents). + Hash plumbing.Hash +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go new file mode 100644 index 0000000000..2891d7d34c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/index/match.go @@ -0,0 +1,186 @@ +package index + +import ( + "path/filepath" + "runtime" + "unicode/utf8" +) + +// match is filepath.Match with support to match fullpath and not only filenames +// code from: +// https://github.com/golang/go/blob/39852bf4cce6927e01d0136c7843f65a801738cb/src/path/filepath/match.go#L44-L224 +func match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + // Cannot skip /. + for i := 0; i < len(name); i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + if runtime.GOOS != "windows" { + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + for len(chunk) > 0 { + if len(s) == 0 { + return + } + switch chunk[0] { + case '[': + // character class + r, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + // We can't end right after '[', we're expecting at least + // a closing bracket and possibly a caret. + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + // possibly negated + negated := chunk[0] == '^' + if negated { + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match == negated { + return + } + + case '?': + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + + case '\\': + if runtime.GOOS != "windows" { + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + } + fallthrough + + default: + if chunk[0] != s[0] { + return + } + s = s[1:] + chunk = chunk[1:] + } + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = filepath.ErrBadPattern + return + } + if chunk[0] == '\\' && runtime.GOOS != "windows" { + chunk = chunk[1:] + if len(chunk) == 0 { + err = filepath.ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = filepath.ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = filepath.ErrBadPattern + } + return +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go new file mode 100644 index 0000000000..a7145160ae --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/doc.go @@ -0,0 +1,2 @@ +// Package objfile implements encoding and decoding of object files. +package objfile diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go new file mode 100644 index 0000000000..b6b2ca06dd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/reader.go @@ -0,0 +1,114 @@ +package objfile + +import ( + "compress/zlib" + "errors" + "io" + "strconv" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/packfile" +) + +var ( + ErrClosed = errors.New("objfile: already closed") + ErrHeader = errors.New("objfile: invalid header") + ErrNegativeSize = errors.New("objfile: negative object size") +) + +// Reader reads and decodes compressed objfile data from a provided io.Reader. +// Reader implements io.ReadCloser. Close should be called when finished with +// the Reader. Close will not close the underlying io.Reader. +type Reader struct { + multi io.Reader + zlib io.ReadCloser + hasher plumbing.Hasher +} + +// NewReader returns a new Reader reading from r. +func NewReader(r io.Reader) (*Reader, error) { + zlib, err := zlib.NewReader(r) + if err != nil { + return nil, packfile.ErrZLib.AddDetails(err.Error()) + } + + return &Reader{ + zlib: zlib, + }, nil +} + +// Header reads the type and the size of object, and prepares the reader for read +func (r *Reader) Header() (t plumbing.ObjectType, size int64, err error) { + var raw []byte + raw, err = r.readUntil(' ') + if err != nil { + return + } + + t, err = plumbing.ParseObjectType(string(raw)) + if err != nil { + return + } + + raw, err = r.readUntil(0) + if err != nil { + return + } + + size, err = strconv.ParseInt(string(raw), 10, 64) + if err != nil { + err = ErrHeader + return + } + + defer r.prepareForRead(t, size) + return +} + +// readSlice reads one byte at a time from r until it encounters delim or an +// error. +func (r *Reader) readUntil(delim byte) ([]byte, error) { + var buf [1]byte + value := make([]byte, 0, 16) + for { + if n, err := r.zlib.Read(buf[:]); err != nil && (err != io.EOF || n == 0) { + if err == io.EOF { + return nil, ErrHeader + } + return nil, err + } + + if buf[0] == delim { + return value, nil + } + + value = append(value, buf[0]) + } +} + +func (r *Reader) prepareForRead(t plumbing.ObjectType, size int64) { + r.hasher = plumbing.NewHasher(t, size) + r.multi = io.TeeReader(r.zlib, r.hasher) +} + +// Read reads len(p) bytes into p from the object data stream. It returns +// the number of bytes read (0 <= n <= len(p)) and any error encountered. Even +// if Read returns n < len(p), it may use all of p as scratch space during the +// call. +// +// If Read encounters the end of the data stream it will return err == io.EOF, +// either in the current call if n > 0 or in a subsequent call. +func (r *Reader) Read(p []byte) (n int, err error) { + return r.multi.Read(p) +} + +// Hash returns the hash of the object data stream that has been read so far. +func (r *Reader) Hash() plumbing.Hash { + return r.hasher.Sum() +} + +// Close releases any resources consumed by the Reader. Calling Close does not +// close the wrapped io.Reader originally passed to NewReader. +func (r *Reader) Close() error { + return r.zlib.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go new file mode 100644 index 0000000000..2a96a4370b --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/objfile/writer.go @@ -0,0 +1,109 @@ +package objfile + +import ( + "compress/zlib" + "errors" + "io" + "strconv" + + "github.com/go-git/go-git/v5/plumbing" +) + +var ( + ErrOverflow = errors.New("objfile: declared data length exceeded (overflow)") +) + +// Writer writes and encodes data in compressed objfile format to a provided +// io.Writer. Close should be called when finished with the Writer. Close will +// not close the underlying io.Writer. +type Writer struct { + raw io.Writer + zlib io.WriteCloser + hasher plumbing.Hasher + multi io.Writer + + closed bool + pending int64 // number of unwritten bytes +} + +// NewWriter returns a new Writer writing to w. +// +// The returned Writer implements io.WriteCloser. Close should be called when +// finished with the Writer. Close will not close the underlying io.Writer. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + raw: w, + zlib: zlib.NewWriter(w), + } +} + +// WriteHeader writes the type and the size and prepares to accept the object's +// contents. If an invalid t is provided, plumbing.ErrInvalidType is returned. If a +// negative size is provided, ErrNegativeSize is returned. +func (w *Writer) WriteHeader(t plumbing.ObjectType, size int64) error { + if !t.Valid() { + return plumbing.ErrInvalidType + } + if size < 0 { + return ErrNegativeSize + } + + b := t.Bytes() + b = append(b, ' ') + b = append(b, []byte(strconv.FormatInt(size, 10))...) + b = append(b, 0) + + defer w.prepareForWrite(t, size) + _, err := w.zlib.Write(b) + + return err +} + +func (w *Writer) prepareForWrite(t plumbing.ObjectType, size int64) { + w.pending = size + + w.hasher = plumbing.NewHasher(t, size) + w.multi = io.MultiWriter(w.zlib, w.hasher) +} + +// Write writes the object's contents. Write returns the error ErrOverflow if +// more than size bytes are written after WriteHeader. +func (w *Writer) Write(p []byte) (n int, err error) { + if w.closed { + return 0, ErrClosed + } + + overwrite := false + if int64(len(p)) > w.pending { + p = p[0:w.pending] + overwrite = true + } + + n, err = w.multi.Write(p) + w.pending -= int64(n) + if err == nil && overwrite { + err = ErrOverflow + return + } + + return +} + +// Hash returns the hash of the object data stream that has been written so far. +// It can be called before or after Close. +func (w *Writer) Hash() plumbing.Hash { + return w.hasher.Sum() // Not yet closed, return hash of data written so far +} + +// Close releases any resources consumed by the Writer. +// +// Calling Close does not close the wrapped io.Writer originally passed to +// NewWriter. +func (w *Writer) Close() error { + if err := w.zlib.Close(); err != nil { + return err + } + + w.closed = true + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go new file mode 100644 index 0000000000..df423ad50c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/common.go @@ -0,0 +1,78 @@ +package packfile + +import ( + "bytes" + "compress/zlib" + "io" + "sync" + + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var signature = []byte{'P', 'A', 'C', 'K'} + +const ( + // VersionSupported is the packfile version supported by this package + VersionSupported uint32 = 2 + + firstLengthBits = uint8(4) // the first byte into object header has 4 bits to store the length + lengthBits = uint8(7) // subsequent bytes has 7 bits to store the length + maskFirstLength = 15 // 0000 1111 + maskContinue = 0x80 // 1000 0000 + maskLength = uint8(127) // 0111 1111 + maskType = uint8(112) // 0111 0000 +) + +// UpdateObjectStorage updates the storer with the objects in the given +// packfile. +func UpdateObjectStorage(s storer.Storer, packfile io.Reader) error { + if pw, ok := s.(storer.PackfileWriter); ok { + return WritePackfileToObjectStorage(pw, packfile) + } + + p, err := NewParserWithStorage(NewScanner(packfile), s) + if err != nil { + return err + } + + _, err = p.Parse() + return err +} + +// WritePackfileToObjectStorage writes all the packfile objects into the given +// object storage. +func WritePackfileToObjectStorage( + sw storer.PackfileWriter, + packfile io.Reader, +) (err error) { + w, err := sw.PackfileWriter() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + var n int64 + n, err = io.Copy(w, packfile) + if err == nil && n == 0 { + return ErrEmptyPackfile + } + + return err +} + +var bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(nil) + }, +} + +var zlibInitBytes = []byte{0x78, 0x9c, 0x01, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x01} + +var zlibReaderPool = sync.Pool{ + New: func() interface{} { + r, _ := zlib.NewReader(bytes.NewReader(zlibInitBytes)) + return r + }, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go new file mode 100644 index 0000000000..07a61120e5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go @@ -0,0 +1,297 @@ +package packfile + +const blksz = 16 +const maxChainLength = 64 + +// deltaIndex is a modified version of JGit's DeltaIndex adapted to our current +// design. +type deltaIndex struct { + table []int + entries []int + mask int +} + +func (idx *deltaIndex) init(buf []byte) { + scanner := newDeltaIndexScanner(buf, len(buf)) + idx.mask = scanner.mask + idx.table = scanner.table + idx.entries = make([]int, countEntries(scanner)+1) + idx.copyEntries(scanner) +} + +// findMatch returns the offset of src where the block starting at tgtOffset +// is and the length of the match. A length of 0 means there was no match. A +// length of -1 means the src length is lower than the blksz and whatever +// other positive length is the length of the match in bytes. +func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l int) { + if len(tgt) < tgtOffset+s { + return 0, len(tgt) - tgtOffset + } + + if len(src) < blksz { + return 0, -1 + } + + if len(tgt) >= tgtOffset+s && len(src) >= blksz { + h := hashBlock(tgt, tgtOffset) + tIdx := h & idx.mask + eIdx := idx.table[tIdx] + if eIdx != 0 { + srcOffset = idx.entries[eIdx] + } else { + return + } + + l = matchLength(src, tgt, tgtOffset, srcOffset) + } + + return +} + +func matchLength(src, tgt []byte, otgt, osrc int) (l int) { + lensrc := len(src) + lentgt := len(tgt) + for (osrc < lensrc && otgt < lentgt) && src[osrc] == tgt[otgt] { + l++ + osrc++ + otgt++ + } + return +} + +func countEntries(scan *deltaIndexScanner) (cnt int) { + // Figure out exactly how many entries we need. As we do the + // enumeration truncate any delta chains longer than what we + // are willing to scan during encode. This keeps the encode + // logic linear in the size of the input rather than quadratic. + for i := 0; i < len(scan.table); i++ { + h := scan.table[i] + if h == 0 { + continue + } + + size := 0 + for { + size++ + if size == maxChainLength { + scan.next[h] = 0 + break + } + h = scan.next[h] + + if h == 0 { + break + } + } + cnt += size + } + + return +} + +func (idx *deltaIndex) copyEntries(scanner *deltaIndexScanner) { + // Rebuild the entries list from the scanner, positioning all + // blocks in the same hash chain next to each other. We can + // then later discard the next list, along with the scanner. + // + next := 1 + for i := 0; i < len(idx.table); i++ { + h := idx.table[i] + if h == 0 { + continue + } + + idx.table[i] = next + for { + idx.entries[next] = scanner.entries[h] + next++ + h = scanner.next[h] + + if h == 0 { + break + } + } + } +} + +type deltaIndexScanner struct { + table []int + entries []int + next []int + mask int + count int +} + +func newDeltaIndexScanner(buf []byte, size int) *deltaIndexScanner { + size -= size % blksz + worstCaseBlockCnt := size / blksz + if worstCaseBlockCnt < 1 { + return new(deltaIndexScanner) + } + + tableSize := tableSize(worstCaseBlockCnt) + scanner := &deltaIndexScanner{ + table: make([]int, tableSize), + mask: tableSize - 1, + entries: make([]int, worstCaseBlockCnt+1), + next: make([]int, worstCaseBlockCnt+1), + } + + scanner.scan(buf, size) + return scanner +} + +// slightly modified version of JGit's DeltaIndexScanner. We store the offset on the entries +// instead of the entries and the key, so we avoid operations to retrieve the offset later, as +// we don't use the key. +// See: https://github.com/eclipse/jgit/blob/005e5feb4ecd08c4e4d141a38b9e7942accb3212/org.eclipse.jgit/src/org/eclipse/jgit/internal/storage/pack/DeltaIndexScanner.java +func (s *deltaIndexScanner) scan(buf []byte, end int) { + lastHash := 0 + ptr := end - blksz + + for { + key := hashBlock(buf, ptr) + tIdx := key & s.mask + head := s.table[tIdx] + if head != 0 && lastHash == key { + s.entries[head] = ptr + } else { + s.count++ + eIdx := s.count + s.entries[eIdx] = ptr + s.next[eIdx] = head + s.table[tIdx] = eIdx + } + + lastHash = key + ptr -= blksz + + if 0 > ptr { + break + } + } +} + +func tableSize(worstCaseBlockCnt int) int { + shift := 32 - leadingZeros(uint32(worstCaseBlockCnt)) + sz := 1 << uint(shift-1) + if sz < worstCaseBlockCnt { + sz <<= 1 + } + return sz +} + +// use https://golang.org/pkg/math/bits/#LeadingZeros32 in the future +func leadingZeros(x uint32) (n int) { + if x >= 1<<16 { + x >>= 16 + n = 16 + } + if x >= 1<<8 { + x >>= 8 + n += 8 + } + n += int(len8tab[x]) + return 32 - n +} + +var len8tab = [256]uint8{ + 0x00, 0x01, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, +} + +func hashBlock(raw []byte, ptr int) int { + // The first 4 steps collapse out into a 4 byte big-endian decode, + // with a larger right shift as we combined shift lefts together. + // + hash := ((uint32(raw[ptr]) & 0xff) << 24) | + ((uint32(raw[ptr+1]) & 0xff) << 16) | + ((uint32(raw[ptr+2]) & 0xff) << 8) | + (uint32(raw[ptr+3]) & 0xff) + hash ^= T[hash>>31] + + hash = ((hash << 8) | (uint32(raw[ptr+4]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+5]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+6]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+7]) & 0xff)) ^ T[hash>>23] + + hash = ((hash << 8) | (uint32(raw[ptr+8]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+9]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+10]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+11]) & 0xff)) ^ T[hash>>23] + + hash = ((hash << 8) | (uint32(raw[ptr+12]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+13]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+14]) & 0xff)) ^ T[hash>>23] + hash = ((hash << 8) | (uint32(raw[ptr+15]) & 0xff)) ^ T[hash>>23] + + return int(hash) +} + +var T = []uint32{0x00000000, 0xd4c6b32d, 0x7d4bd577, + 0xa98d665a, 0x2e5119c3, 0xfa97aaee, 0x531accb4, 0x87dc7f99, + 0x5ca23386, 0x886480ab, 0x21e9e6f1, 0xf52f55dc, 0x72f32a45, + 0xa6359968, 0x0fb8ff32, 0xdb7e4c1f, 0x6d82d421, 0xb944670c, + 0x10c90156, 0xc40fb27b, 0x43d3cde2, 0x97157ecf, 0x3e981895, + 0xea5eabb8, 0x3120e7a7, 0xe5e6548a, 0x4c6b32d0, 0x98ad81fd, + 0x1f71fe64, 0xcbb74d49, 0x623a2b13, 0xb6fc983e, 0x0fc31b6f, + 0xdb05a842, 0x7288ce18, 0xa64e7d35, 0x219202ac, 0xf554b181, + 0x5cd9d7db, 0x881f64f6, 0x536128e9, 0x87a79bc4, 0x2e2afd9e, + 0xfaec4eb3, 0x7d30312a, 0xa9f68207, 0x007be45d, 0xd4bd5770, + 0x6241cf4e, 0xb6877c63, 0x1f0a1a39, 0xcbcca914, 0x4c10d68d, + 0x98d665a0, 0x315b03fa, 0xe59db0d7, 0x3ee3fcc8, 0xea254fe5, + 0x43a829bf, 0x976e9a92, 0x10b2e50b, 0xc4745626, 0x6df9307c, + 0xb93f8351, 0x1f8636de, 0xcb4085f3, 0x62cde3a9, 0xb60b5084, + 0x31d72f1d, 0xe5119c30, 0x4c9cfa6a, 0x985a4947, 0x43240558, + 0x97e2b675, 0x3e6fd02f, 0xeaa96302, 0x6d751c9b, 0xb9b3afb6, + 0x103ec9ec, 0xc4f87ac1, 0x7204e2ff, 0xa6c251d2, 0x0f4f3788, + 0xdb8984a5, 0x5c55fb3c, 0x88934811, 0x211e2e4b, 0xf5d89d66, + 0x2ea6d179, 0xfa606254, 0x53ed040e, 0x872bb723, 0x00f7c8ba, + 0xd4317b97, 0x7dbc1dcd, 0xa97aaee0, 0x10452db1, 0xc4839e9c, + 0x6d0ef8c6, 0xb9c84beb, 0x3e143472, 0xead2875f, 0x435fe105, + 0x97995228, 0x4ce71e37, 0x9821ad1a, 0x31accb40, 0xe56a786d, + 0x62b607f4, 0xb670b4d9, 0x1ffdd283, 0xcb3b61ae, 0x7dc7f990, + 0xa9014abd, 0x008c2ce7, 0xd44a9fca, 0x5396e053, 0x8750537e, + 0x2edd3524, 0xfa1b8609, 0x2165ca16, 0xf5a3793b, 0x5c2e1f61, + 0x88e8ac4c, 0x0f34d3d5, 0xdbf260f8, 0x727f06a2, 0xa6b9b58f, + 0x3f0c6dbc, 0xebcade91, 0x4247b8cb, 0x96810be6, 0x115d747f, + 0xc59bc752, 0x6c16a108, 0xb8d01225, 0x63ae5e3a, 0xb768ed17, + 0x1ee58b4d, 0xca233860, 0x4dff47f9, 0x9939f4d4, 0x30b4928e, + 0xe47221a3, 0x528eb99d, 0x86480ab0, 0x2fc56cea, 0xfb03dfc7, + 0x7cdfa05e, 0xa8191373, 0x01947529, 0xd552c604, 0x0e2c8a1b, + 0xdaea3936, 0x73675f6c, 0xa7a1ec41, 0x207d93d8, 0xf4bb20f5, + 0x5d3646af, 0x89f0f582, 0x30cf76d3, 0xe409c5fe, 0x4d84a3a4, + 0x99421089, 0x1e9e6f10, 0xca58dc3d, 0x63d5ba67, 0xb713094a, + 0x6c6d4555, 0xb8abf678, 0x11269022, 0xc5e0230f, 0x423c5c96, + 0x96faefbb, 0x3f7789e1, 0xebb13acc, 0x5d4da2f2, 0x898b11df, + 0x20067785, 0xf4c0c4a8, 0x731cbb31, 0xa7da081c, 0x0e576e46, + 0xda91dd6b, 0x01ef9174, 0xd5292259, 0x7ca44403, 0xa862f72e, + 0x2fbe88b7, 0xfb783b9a, 0x52f55dc0, 0x8633eeed, 0x208a5b62, + 0xf44ce84f, 0x5dc18e15, 0x89073d38, 0x0edb42a1, 0xda1df18c, + 0x739097d6, 0xa75624fb, 0x7c2868e4, 0xa8eedbc9, 0x0163bd93, + 0xd5a50ebe, 0x52797127, 0x86bfc20a, 0x2f32a450, 0xfbf4177d, + 0x4d088f43, 0x99ce3c6e, 0x30435a34, 0xe485e919, 0x63599680, + 0xb79f25ad, 0x1e1243f7, 0xcad4f0da, 0x11aabcc5, 0xc56c0fe8, + 0x6ce169b2, 0xb827da9f, 0x3ffba506, 0xeb3d162b, 0x42b07071, + 0x9676c35c, 0x2f49400d, 0xfb8ff320, 0x5202957a, 0x86c42657, + 0x011859ce, 0xd5deeae3, 0x7c538cb9, 0xa8953f94, 0x73eb738b, + 0xa72dc0a6, 0x0ea0a6fc, 0xda6615d1, 0x5dba6a48, 0x897cd965, + 0x20f1bf3f, 0xf4370c12, 0x42cb942c, 0x960d2701, 0x3f80415b, + 0xeb46f276, 0x6c9a8def, 0xb85c3ec2, 0x11d15898, 0xc517ebb5, + 0x1e69a7aa, 0xcaaf1487, 0x632272dd, 0xb7e4c1f0, 0x3038be69, + 0xe4fe0d44, 0x4d736b1e, 0x99b5d833, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go new file mode 100644 index 0000000000..4b60ff3947 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_selector.go @@ -0,0 +1,369 @@ +package packfile + +import ( + "sort" + "sync" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +const ( + // deltas based on deltas, how many steps we can do. + // 50 is the default value used in JGit + maxDepth = int64(50) +) + +// applyDelta is the set of object types that we should apply deltas +var applyDelta = map[plumbing.ObjectType]bool{ + plumbing.BlobObject: true, + plumbing.TreeObject: true, +} + +type deltaSelector struct { + storer storer.EncodedObjectStorer +} + +func newDeltaSelector(s storer.EncodedObjectStorer) *deltaSelector { + return &deltaSelector{s} +} + +// ObjectsToPack creates a list of ObjectToPack from the hashes +// provided, creating deltas if it's suitable, using an specific +// internal logic. `packWindow` specifies the size of the sliding +// window used to compare objects for delta compression; 0 turns off +// delta compression entirely. +func (dw *deltaSelector) ObjectsToPack( + hashes []plumbing.Hash, + packWindow uint, +) ([]*ObjectToPack, error) { + otp, err := dw.objectsToPack(hashes, packWindow) + if err != nil { + return nil, err + } + + if packWindow == 0 { + return otp, nil + } + + dw.sort(otp) + + var objectGroups [][]*ObjectToPack + var prev *ObjectToPack + i := -1 + for _, obj := range otp { + if prev == nil || prev.Type() != obj.Type() { + objectGroups = append(objectGroups, []*ObjectToPack{obj}) + i++ + prev = obj + } else { + objectGroups[i] = append(objectGroups[i], obj) + } + } + + var wg sync.WaitGroup + var once sync.Once + for _, objs := range objectGroups { + objs := objs + wg.Add(1) + go func() { + if walkErr := dw.walk(objs, packWindow); walkErr != nil { + once.Do(func() { + err = walkErr + }) + } + wg.Done() + }() + } + wg.Wait() + + if err != nil { + return nil, err + } + + return otp, nil +} + +func (dw *deltaSelector) objectsToPack( + hashes []plumbing.Hash, + packWindow uint, +) ([]*ObjectToPack, error) { + var objectsToPack []*ObjectToPack + for _, h := range hashes { + var o plumbing.EncodedObject + var err error + if packWindow == 0 { + o, err = dw.encodedObject(h) + } else { + o, err = dw.encodedDeltaObject(h) + } + if err != nil { + return nil, err + } + + otp := newObjectToPack(o) + if _, ok := o.(plumbing.DeltaObject); ok { + otp.CleanOriginal() + } + + objectsToPack = append(objectsToPack, otp) + } + + if packWindow == 0 { + return objectsToPack, nil + } + + if err := dw.fixAndBreakChains(objectsToPack); err != nil { + return nil, err + } + + return objectsToPack, nil +} + +func (dw *deltaSelector) encodedDeltaObject(h plumbing.Hash) (plumbing.EncodedObject, error) { + edos, ok := dw.storer.(storer.DeltaObjectStorer) + if !ok { + return dw.encodedObject(h) + } + + return edos.DeltaObject(plumbing.AnyObject, h) +} + +func (dw *deltaSelector) encodedObject(h plumbing.Hash) (plumbing.EncodedObject, error) { + return dw.storer.EncodedObject(plumbing.AnyObject, h) +} + +func (dw *deltaSelector) fixAndBreakChains(objectsToPack []*ObjectToPack) error { + m := make(map[plumbing.Hash]*ObjectToPack, len(objectsToPack)) + for _, otp := range objectsToPack { + m[otp.Hash()] = otp + } + + for _, otp := range objectsToPack { + if err := dw.fixAndBreakChainsOne(m, otp); err != nil { + return err + } + } + + return nil +} + +func (dw *deltaSelector) fixAndBreakChainsOne(objectsToPack map[plumbing.Hash]*ObjectToPack, otp *ObjectToPack) error { + if !otp.Object.Type().IsDelta() { + return nil + } + + // Initial ObjectToPack instances might have a delta assigned to Object + // but no actual base initially. Once Base is assigned to a delta, it means + // we already fixed it. + if otp.Base != nil { + return nil + } + + do, ok := otp.Object.(plumbing.DeltaObject) + if !ok { + // if this is not a DeltaObject, then we cannot retrieve its base, + // so we have to break the delta chain here. + return dw.undeltify(otp) + } + + base, ok := objectsToPack[do.BaseHash()] + if !ok { + // The base of the delta is not in our list of objects to pack, so + // we break the chain. + return dw.undeltify(otp) + } + + if err := dw.fixAndBreakChainsOne(objectsToPack, base); err != nil { + return err + } + + otp.SetDelta(base, otp.Object) + return nil +} + +func (dw *deltaSelector) restoreOriginal(otp *ObjectToPack) error { + if otp.Original != nil { + return nil + } + + if !otp.Object.Type().IsDelta() { + return nil + } + + obj, err := dw.encodedObject(otp.Hash()) + if err != nil { + return err + } + + otp.SetOriginal(obj) + + return nil +} + +// undeltify undeltifies an *ObjectToPack by retrieving the original object from +// the storer and resetting it. +func (dw *deltaSelector) undeltify(otp *ObjectToPack) error { + if err := dw.restoreOriginal(otp); err != nil { + return err + } + + otp.Object = otp.Original + otp.Depth = 0 + return nil +} + +func (dw *deltaSelector) sort(objectsToPack []*ObjectToPack) { + sort.Sort(byTypeAndSize(objectsToPack)) +} + +func (dw *deltaSelector) walk( + objectsToPack []*ObjectToPack, + packWindow uint, +) error { + indexMap := make(map[plumbing.Hash]*deltaIndex) + for i := 0; i < len(objectsToPack); i++ { + // Clean up the index map and reconstructed delta objects for anything + // outside our pack window, to save memory. + if i > int(packWindow) { + obj := objectsToPack[i-int(packWindow)] + + delete(indexMap, obj.Hash()) + + if obj.IsDelta() { + obj.SaveOriginalMetadata() + obj.CleanOriginal() + } + } + + target := objectsToPack[i] + + // If we already have a delta, we don't try to find a new one for this + // object. This happens when a delta is set to be reused from an existing + // packfile. + if target.IsDelta() { + continue + } + + // We only want to create deltas from specific types. + if !applyDelta[target.Type()] { + continue + } + + for j := i - 1; j >= 0 && i-j < int(packWindow); j-- { + base := objectsToPack[j] + // Objects must use only the same type as their delta base. + // Since objectsToPack is sorted by type and size, once we find + // a different type, we know we won't find more of them. + if base.Type() != target.Type() { + break + } + + if err := dw.tryToDeltify(indexMap, base, target); err != nil { + return err + } + } + } + + return nil +} + +func (dw *deltaSelector) tryToDeltify(indexMap map[plumbing.Hash]*deltaIndex, base, target *ObjectToPack) error { + // Original object might not be present if we're reusing a delta, so we + // ensure it is restored. + if err := dw.restoreOriginal(target); err != nil { + return err + } + + if err := dw.restoreOriginal(base); err != nil { + return err + } + + // If the sizes are radically different, this is a bad pairing. + if target.Size() < base.Size()>>4 { + return nil + } + + msz := dw.deltaSizeLimit( + target.Object.Size(), + base.Depth, + target.Depth, + target.IsDelta(), + ) + + // Nearly impossible to fit useful delta. + if msz <= 8 { + return nil + } + + // If we have to insert a lot to make this work, find another. + if base.Size()-target.Size() > msz { + return nil + } + + if _, ok := indexMap[base.Hash()]; !ok { + indexMap[base.Hash()] = new(deltaIndex) + } + + // Now we can generate the delta using originals + delta, err := getDelta(indexMap[base.Hash()], base.Original, target.Original) + if err != nil { + return err + } + + // if delta better than target + if delta.Size() < msz { + target.SetDelta(base, delta) + } + + return nil +} + +func (dw *deltaSelector) deltaSizeLimit(targetSize int64, baseDepth int, + targetDepth int, targetDelta bool) int64 { + if !targetDelta { + // Any delta should be no more than 50% of the original size + // (for text files deflate of whole form should shrink 50%). + n := targetSize >> 1 + + // Evenly distribute delta size limits over allowed depth. + // If src is non-delta (depth = 0), delta <= 50% of original. + // If src is almost at limit (9/10), delta <= 10% of original. + return n * (maxDepth - int64(baseDepth)) / maxDepth + } + + // With a delta base chosen any new delta must be "better". + // Retain the distribution described above. + d := int64(targetDepth) + n := targetSize + + // If target depth is bigger than maxDepth, this delta is not suitable to be used. + if d >= maxDepth { + return 0 + } + + // If src is whole (depth=0) and base is near limit (depth=9/10) + // any delta using src can be 10x larger and still be better. + // + // If src is near limit (depth=9/10) and base is whole (depth=0) + // a new delta dependent on src must be 1/10th the size. + return n * (maxDepth - int64(baseDepth)) / (maxDepth - d) +} + +type byTypeAndSize []*ObjectToPack + +func (a byTypeAndSize) Len() int { return len(a) } + +func (a byTypeAndSize) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a byTypeAndSize) Less(i, j int) bool { + if a[i].Type() < a[j].Type() { + return false + } + + if a[i].Type() > a[j].Type() { + return true + } + + return a[i].Size() > a[j].Size() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go new file mode 100644 index 0000000000..1951b34ef1 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/diff_delta.go @@ -0,0 +1,204 @@ +package packfile + +import ( + "bytes" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// See https://github.com/jelmer/dulwich/blob/master/dulwich/pack.py and +// https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js +// for more info + +const ( + // Standard chunk size used to generate fingerprints + s = 16 + + // https://github.com/git/git/blob/f7466e94375b3be27f229c78873f0acf8301c0a5/diff-delta.c#L428 + // Max size of a copy operation (64KB) + maxCopySize = 64 * 1024 +) + +// GetDelta returns an EncodedObject of type OFSDeltaObject. Base and Target object, +// will be loaded into memory to be able to create the delta object. +// To generate target again, you will need the obtained object and "base" one. +// Error will be returned if base or target object cannot be read. +func GetDelta(base, target plumbing.EncodedObject) (plumbing.EncodedObject, error) { + return getDelta(new(deltaIndex), base, target) +} + +func getDelta(index *deltaIndex, base, target plumbing.EncodedObject) (o plumbing.EncodedObject, err error) { + br, err := base.Reader() + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(br, &err) + + tr, err := target.Reader() + if err != nil { + return nil, err + } + + defer ioutil.CheckClose(tr, &err) + + bb := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(bb) + bb.Reset() + + _, err = bb.ReadFrom(br) + if err != nil { + return nil, err + } + + tb := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(tb) + tb.Reset() + + _, err = tb.ReadFrom(tr) + if err != nil { + return nil, err + } + + db := diffDelta(index, bb.Bytes(), tb.Bytes()) + delta := &plumbing.MemoryObject{} + _, err = delta.Write(db) + if err != nil { + return nil, err + } + + delta.SetSize(int64(len(db))) + delta.SetType(plumbing.OFSDeltaObject) + + return delta, nil +} + +// DiffDelta returns the delta that transforms src into tgt. +func DiffDelta(src, tgt []byte) []byte { + return diffDelta(new(deltaIndex), src, tgt) +} + +func diffDelta(index *deltaIndex, src []byte, tgt []byte) []byte { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + buf.Write(deltaEncodeSize(len(src))) + buf.Write(deltaEncodeSize(len(tgt))) + + if len(index.entries) == 0 { + index.init(src) + } + + ibuf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(ibuf) + ibuf.Reset() + for i := 0; i < len(tgt); i++ { + offset, l := index.findMatch(src, tgt, i) + + if l == 0 { + // couldn't find a match, just write the current byte and continue + ibuf.WriteByte(tgt[i]) + } else if l < 0 { + // src is less than blksz, copy the rest of the target to avoid + // calls to findMatch + for ; i < len(tgt); i++ { + ibuf.WriteByte(tgt[i]) + } + } else if l < s { + // remaining target is less than blksz, copy what's left of it + // and avoid calls to findMatch + for j := i; j < i+l; j++ { + ibuf.WriteByte(tgt[j]) + } + i += l - 1 + } else { + encodeInsertOperation(ibuf, buf) + + rl := l + aOffset := offset + for rl > 0 { + if rl < maxCopySize { + buf.Write(encodeCopyOperation(aOffset, rl)) + break + } + + buf.Write(encodeCopyOperation(aOffset, maxCopySize)) + rl -= maxCopySize + aOffset += maxCopySize + } + + i += l - 1 + } + } + + encodeInsertOperation(ibuf, buf) + + // buf.Bytes() is only valid until the next modifying operation on the buffer. Copy it. + return append([]byte{}, buf.Bytes()...) +} + +func encodeInsertOperation(ibuf, buf *bytes.Buffer) { + if ibuf.Len() == 0 { + return + } + + b := ibuf.Bytes() + s := ibuf.Len() + o := 0 + for { + if s <= 127 { + break + } + buf.WriteByte(byte(127)) + buf.Write(b[o : o+127]) + s -= 127 + o += 127 + } + buf.WriteByte(byte(s)) + buf.Write(b[o : o+s]) + + ibuf.Reset() +} + +func deltaEncodeSize(size int) []byte { + var ret []byte + c := size & 0x7f + size >>= 7 + for { + if size == 0 { + break + } + + ret = append(ret, byte(c|0x80)) + c = size & 0x7f + size >>= 7 + } + ret = append(ret, byte(c)) + + return ret +} + +func encodeCopyOperation(offset, length int) []byte { + code := 0x80 + var opcodes []byte + + var i uint + for i = 0; i < 4; i++ { + f := 0xff << (i * 8) + if offset&f != 0 { + opcodes = append(opcodes, byte(offset&f>>(i*8))) + code |= 0x01 << i + } + } + + for i = 0; i < 3; i++ { + f := 0xff << (i * 8) + if length&f != 0 { + opcodes = append(opcodes, byte(length&f>>(i*8))) + code |= 0x10 << i + } + } + + return append([]byte{byte(code)}, opcodes...) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go new file mode 100644 index 0000000000..2882a7f378 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/doc.go @@ -0,0 +1,39 @@ +// Package packfile implements encoding and decoding of packfile format. +// +// == pack-*.pack files have the following format: +// +// - A header appears at the beginning and consists of the following: +// +// 4-byte signature: +// The signature is: {'P', 'A', 'C', 'K'} +// +// 4-byte version number (network byte order): +// GIT currently accepts version number 2 or 3 but +// generates version 2 only. +// +// 4-byte number of objects contained in the pack (network byte order) +// +// Observation: we cannot have more than 4G versions ;-) and +// more than 4G objects in a pack. +// +// - The header is followed by number of object entries, each of +// which looks like this: +// +// (undeltified representation) +// n-byte type and length (3-bit type, (n-1)*7+4-bit length) +// compressed data +// +// (deltified representation) +// n-byte type and length (3-bit type, (n-1)*7+4-bit length) +// 20-byte base object name +// compressed delta data +// +// Observation: length of each object is encoded in a variable +// length format and is not constrained to 32-bit or anything. +// +// - The trailer records 20-byte SHA1 checksum of all of the above. +// +// +// Source: +// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt +package packfile diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go new file mode 100644 index 0000000000..5501f8861c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/encoder.go @@ -0,0 +1,225 @@ +package packfile + +import ( + "compress/zlib" + "crypto/sha1" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/binary" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// Encoder gets the data from the storage and write it into the writer in PACK +// format +type Encoder struct { + selector *deltaSelector + w *offsetWriter + zw *zlib.Writer + hasher plumbing.Hasher + + useRefDeltas bool +} + +// NewEncoder creates a new packfile encoder using a specific Writer and +// EncodedObjectStorer. By default deltas used to generate the packfile will be +// OFSDeltaObject. To use Reference deltas, set useRefDeltas to true. +func NewEncoder(w io.Writer, s storer.EncodedObjectStorer, useRefDeltas bool) *Encoder { + h := plumbing.Hasher{ + Hash: sha1.New(), + } + mw := io.MultiWriter(w, h) + ow := newOffsetWriter(mw) + zw := zlib.NewWriter(mw) + return &Encoder{ + selector: newDeltaSelector(s), + w: ow, + zw: zw, + hasher: h, + useRefDeltas: useRefDeltas, + } +} + +// Encode creates a packfile containing all the objects referenced in +// hashes and writes it to the writer in the Encoder. `packWindow` +// specifies the size of the sliding window used to compare objects +// for delta compression; 0 turns off delta compression entirely. +func (e *Encoder) Encode( + hashes []plumbing.Hash, + packWindow uint, +) (plumbing.Hash, error) { + objects, err := e.selector.ObjectsToPack(hashes, packWindow) + if err != nil { + return plumbing.ZeroHash, err + } + + return e.encode(objects) +} + +func (e *Encoder) encode(objects []*ObjectToPack) (plumbing.Hash, error) { + if err := e.head(len(objects)); err != nil { + return plumbing.ZeroHash, err + } + + for _, o := range objects { + if err := e.entry(o); err != nil { + return plumbing.ZeroHash, err + } + } + + return e.footer() +} + +func (e *Encoder) head(numEntries int) error { + return binary.Write( + e.w, + signature, + int32(VersionSupported), + int32(numEntries), + ) +} + +func (e *Encoder) entry(o *ObjectToPack) (err error) { + if o.WantWrite() { + // A cycle exists in this delta chain. This should only occur if a + // selected object representation disappeared during writing + // (for example due to a concurrent repack) and a different base + // was chosen, forcing a cycle. Select something other than a + // delta, and write this object. + e.selector.restoreOriginal(o) + o.BackToOriginal() + } + + if o.IsWritten() { + return nil + } + + o.MarkWantWrite() + + if err := e.writeBaseIfDelta(o); err != nil { + return err + } + + // We need to check if we already write that object due a cyclic delta chain + if o.IsWritten() { + return nil + } + + o.Offset = e.w.Offset() + + if o.IsDelta() { + if err := e.writeDeltaHeader(o); err != nil { + return err + } + } else { + if err := e.entryHead(o.Type(), o.Size()); err != nil { + return err + } + } + + e.zw.Reset(e.w) + + defer ioutil.CheckClose(e.zw, &err) + + or, err := o.Object.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(or, &err) + + _, err = io.Copy(e.zw, or) + if err != nil { + return err + } + + return nil +} + +func (e *Encoder) writeBaseIfDelta(o *ObjectToPack) error { + if o.IsDelta() && !o.Base.IsWritten() { + // We must write base first + return e.entry(o.Base) + } + + return nil +} + +func (e *Encoder) writeDeltaHeader(o *ObjectToPack) error { + // Write offset deltas by default + t := plumbing.OFSDeltaObject + if e.useRefDeltas { + t = plumbing.REFDeltaObject + } + + if err := e.entryHead(t, o.Object.Size()); err != nil { + return err + } + + if e.useRefDeltas { + return e.writeRefDeltaHeader(o.Base.Hash()) + } else { + return e.writeOfsDeltaHeader(o) + } +} + +func (e *Encoder) writeRefDeltaHeader(base plumbing.Hash) error { + return binary.Write(e.w, base) +} + +func (e *Encoder) writeOfsDeltaHeader(o *ObjectToPack) error { + // for OFS_DELTA, offset of the base is interpreted as negative offset + // relative to the type-byte of the header of the ofs-delta entry. + relativeOffset := o.Offset - o.Base.Offset + if relativeOffset <= 0 { + return fmt.Errorf("bad offset for OFS_DELTA entry: %d", relativeOffset) + } + + return binary.WriteVariableWidthInt(e.w, relativeOffset) +} + +func (e *Encoder) entryHead(typeNum plumbing.ObjectType, size int64) error { + t := int64(typeNum) + header := []byte{} + c := (t << firstLengthBits) | (size & maskFirstLength) + size >>= firstLengthBits + for { + if size == 0 { + break + } + header = append(header, byte(c|maskContinue)) + c = size & int64(maskLength) + size >>= lengthBits + } + + header = append(header, byte(c)) + _, err := e.w.Write(header) + + return err +} + +func (e *Encoder) footer() (plumbing.Hash, error) { + h := e.hasher.Sum() + return h, binary.Write(e.w, h) +} + +type offsetWriter struct { + w io.Writer + offset int64 +} + +func newOffsetWriter(w io.Writer) *offsetWriter { + return &offsetWriter{w: w} +} + +func (ow *offsetWriter) Write(p []byte) (n int, err error) { + n, err = ow.w.Write(p) + ow.offset += int64(n) + return n, err +} + +func (ow *offsetWriter) Offset() int64 { + return ow.offset +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go new file mode 100644 index 0000000000..c0b9163313 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/error.go @@ -0,0 +1,30 @@ +package packfile + +import "fmt" + +// Error specifies errors returned during packfile parsing. +type Error struct { + reason, details string +} + +// NewError returns a new error. +func NewError(reason string) *Error { + return &Error{reason: reason} +} + +// Error returns a text representation of the error. +func (e *Error) Error() string { + if e.details == "" { + return e.reason + } + + return fmt.Sprintf("%s: %s", e.reason, e.details) +} + +// AddDetails adds details to an error, with additional text. +func (e *Error) AddDetails(format string, args ...interface{}) *Error { + return &Error{ + reason: e.reason, + details: fmt.Sprintf(format, args...), + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go new file mode 100644 index 0000000000..c5edaf52ee --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/fsobject.go @@ -0,0 +1,116 @@ +package packfile + +import ( + "io" + + billy "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/format/idxfile" +) + +// FSObject is an object from the packfile on the filesystem. +type FSObject struct { + hash plumbing.Hash + h *ObjectHeader + offset int64 + size int64 + typ plumbing.ObjectType + index idxfile.Index + fs billy.Filesystem + path string + cache cache.Object +} + +// NewFSObject creates a new filesystem object. +func NewFSObject( + hash plumbing.Hash, + finalType plumbing.ObjectType, + offset int64, + contentSize int64, + index idxfile.Index, + fs billy.Filesystem, + path string, + cache cache.Object, +) *FSObject { + return &FSObject{ + hash: hash, + offset: offset, + size: contentSize, + typ: finalType, + index: index, + fs: fs, + path: path, + cache: cache, + } +} + +// Reader implements the plumbing.EncodedObject interface. +func (o *FSObject) Reader() (io.ReadCloser, error) { + obj, ok := o.cache.Get(o.hash) + if ok && obj != o { + reader, err := obj.Reader() + if err != nil { + return nil, err + } + + return reader, nil + } + + f, err := o.fs.Open(o.path) + if err != nil { + return nil, err + } + + p := NewPackfileWithCache(o.index, nil, f, o.cache) + r, err := p.getObjectContent(o.offset) + if err != nil { + _ = f.Close() + return nil, err + } + + if err := f.Close(); err != nil { + return nil, err + } + + return r, nil +} + +// SetSize implements the plumbing.EncodedObject interface. This method +// is a noop. +func (o *FSObject) SetSize(int64) {} + +// SetType implements the plumbing.EncodedObject interface. This method is +// a noop. +func (o *FSObject) SetType(plumbing.ObjectType) {} + +// Hash implements the plumbing.EncodedObject interface. +func (o *FSObject) Hash() plumbing.Hash { return o.hash } + +// Size implements the plumbing.EncodedObject interface. +func (o *FSObject) Size() int64 { return o.size } + +// Type implements the plumbing.EncodedObject interface. +func (o *FSObject) Type() plumbing.ObjectType { + return o.typ +} + +// Writer implements the plumbing.EncodedObject interface. This method always +// returns a nil writer. +func (o *FSObject) Writer() (io.WriteCloser, error) { + return nil, nil +} + +type objectReader struct { + io.ReadCloser + f billy.File +} + +func (r *objectReader) Close() error { + if err := r.ReadCloser.Close(); err != nil { + _ = r.f.Close() + return err + } + + return r.f.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go new file mode 100644 index 0000000000..8ce29ef8ba --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/object_pack.go @@ -0,0 +1,164 @@ +package packfile + +import ( + "github.com/go-git/go-git/v5/plumbing" +) + +// ObjectToPack is a representation of an object that is going to be into a +// pack file. +type ObjectToPack struct { + // The main object to pack, it could be any object, including deltas + Object plumbing.EncodedObject + // Base is the object that a delta is based on (it could be also another delta). + // If the main object is not a delta, Base will be null + Base *ObjectToPack + // Original is the object that we can generate applying the delta to + // Base, or the same object as Object in the case of a non-delta + // object. + Original plumbing.EncodedObject + // Depth is the amount of deltas needed to resolve to obtain Original + // (delta based on delta based on ...) + Depth int + + // offset in pack when object has been already written, or 0 if it + // has not been written yet + Offset int64 + + // Information from the original object + resolvedOriginal bool + originalType plumbing.ObjectType + originalSize int64 + originalHash plumbing.Hash +} + +// newObjectToPack creates a correct ObjectToPack based on a non-delta object +func newObjectToPack(o plumbing.EncodedObject) *ObjectToPack { + return &ObjectToPack{ + Object: o, + Original: o, + } +} + +// newDeltaObjectToPack creates a correct ObjectToPack for a delta object, based on +// his base (could be another delta), the delta target (in this case called original), +// and the delta Object itself +func newDeltaObjectToPack(base *ObjectToPack, original, delta plumbing.EncodedObject) *ObjectToPack { + return &ObjectToPack{ + Object: delta, + Base: base, + Original: original, + Depth: base.Depth + 1, + } +} + +// BackToOriginal converts that ObjectToPack to a non-deltified object if it was one +func (o *ObjectToPack) BackToOriginal() { + if o.IsDelta() && o.Original != nil { + o.Object = o.Original + o.Base = nil + o.Depth = 0 + } +} + +// IsWritten returns if that ObjectToPack was +// already written into the packfile or not +func (o *ObjectToPack) IsWritten() bool { + return o.Offset > 1 +} + +// MarkWantWrite marks this ObjectToPack as WantWrite +// to avoid delta chain loops +func (o *ObjectToPack) MarkWantWrite() { + o.Offset = 1 +} + +// WantWrite checks if this ObjectToPack was marked as WantWrite before +func (o *ObjectToPack) WantWrite() bool { + return o.Offset == 1 +} + +// SetOriginal sets both Original and saves size, type and hash. If object +// is nil Original is set but previous resolved values are kept +func (o *ObjectToPack) SetOriginal(obj plumbing.EncodedObject) { + o.Original = obj + o.SaveOriginalMetadata() +} + +// SaveOriginalMetadata saves size, type and hash of Original object +func (o *ObjectToPack) SaveOriginalMetadata() { + if o.Original != nil { + o.originalSize = o.Original.Size() + o.originalType = o.Original.Type() + o.originalHash = o.Original.Hash() + o.resolvedOriginal = true + } +} + +// CleanOriginal sets Original to nil +func (o *ObjectToPack) CleanOriginal() { + o.Original = nil +} + +func (o *ObjectToPack) Type() plumbing.ObjectType { + if o.Original != nil { + return o.Original.Type() + } + + if o.resolvedOriginal { + return o.originalType + } + + if o.Base != nil { + return o.Base.Type() + } + + if o.Object != nil { + return o.Object.Type() + } + + panic("cannot get type") +} + +func (o *ObjectToPack) Hash() plumbing.Hash { + if o.Original != nil { + return o.Original.Hash() + } + + if o.resolvedOriginal { + return o.originalHash + } + + do, ok := o.Object.(plumbing.DeltaObject) + if ok { + return do.ActualHash() + } + + panic("cannot get hash") +} + +func (o *ObjectToPack) Size() int64 { + if o.Original != nil { + return o.Original.Size() + } + + if o.resolvedOriginal { + return o.originalSize + } + + do, ok := o.Object.(plumbing.DeltaObject) + if ok { + return do.ActualSize() + } + + panic("cannot get ObjectToPack size") +} + +func (o *ObjectToPack) IsDelta() bool { + return o.Base != nil +} + +func (o *ObjectToPack) SetDelta(base *ObjectToPack, delta plumbing.EncodedObject) { + o.Object = delta + o.Base = base + o.Depth = base.Depth + 1 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go new file mode 100644 index 0000000000..ddd7f62fce --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/packfile.go @@ -0,0 +1,565 @@ +package packfile + +import ( + "bytes" + "io" + "os" + + billy "github.com/go-git/go-billy/v5" + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/format/idxfile" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var ( + // ErrInvalidObject is returned by Decode when an invalid object is + // found in the packfile. + ErrInvalidObject = NewError("invalid git object") + // ErrZLib is returned by Decode when there was an error unzipping + // the packfile contents. + ErrZLib = NewError("zlib reading error") +) + +// When reading small objects from packfile it is beneficial to do so at +// once to exploit the buffered I/O. In many cases the objects are so small +// that they were already loaded to memory when the object header was +// loaded from the packfile. Wrapping in FSObject would cause this buffered +// data to be thrown away and then re-read later, with the additional +// seeking causing reloads from disk. Objects smaller than this threshold +// are now always read into memory and stored in cache instead of being +// wrapped in FSObject. +const smallObjectThreshold = 16 * 1024 + +// Packfile allows retrieving information from inside a packfile. +type Packfile struct { + idxfile.Index + fs billy.Filesystem + file billy.File + s *Scanner + deltaBaseCache cache.Object + offsetToType map[int64]plumbing.ObjectType +} + +// NewPackfileWithCache creates a new Packfile with the given object cache. +// If the filesystem is provided, the packfile will return FSObjects, otherwise +// it will return MemoryObjects. +func NewPackfileWithCache( + index idxfile.Index, + fs billy.Filesystem, + file billy.File, + cache cache.Object, +) *Packfile { + s := NewScanner(file) + return &Packfile{ + index, + fs, + file, + s, + cache, + make(map[int64]plumbing.ObjectType), + } +} + +// NewPackfile returns a packfile representation for the given packfile file +// and packfile idx. +// If the filesystem is provided, the packfile will return FSObjects, otherwise +// it will return MemoryObjects. +func NewPackfile(index idxfile.Index, fs billy.Filesystem, file billy.File) *Packfile { + return NewPackfileWithCache(index, fs, file, cache.NewObjectLRUDefault()) +} + +// Get retrieves the encoded object in the packfile with the given hash. +func (p *Packfile) Get(h plumbing.Hash) (plumbing.EncodedObject, error) { + offset, err := p.FindOffset(h) + if err != nil { + return nil, err + } + + return p.objectAtOffset(offset, h) +} + +// GetByOffset retrieves the encoded object from the packfile at the given +// offset. +func (p *Packfile) GetByOffset(o int64) (plumbing.EncodedObject, error) { + hash, err := p.FindHash(o) + if err != nil { + return nil, err + } + + return p.objectAtOffset(o, hash) +} + +// GetSizeByOffset retrieves the size of the encoded object from the +// packfile with the given offset. +func (p *Packfile) GetSizeByOffset(o int64) (size int64, err error) { + if _, err := p.s.SeekFromStart(o); err != nil { + if err == io.EOF || isInvalid(err) { + return 0, plumbing.ErrObjectNotFound + } + + return 0, err + } + + h, err := p.nextObjectHeader() + if err != nil { + return 0, err + } + return p.getObjectSize(h) +} + +func (p *Packfile) objectHeaderAtOffset(offset int64) (*ObjectHeader, error) { + h, err := p.s.SeekObjectHeader(offset) + p.s.pendingObject = nil + return h, err +} + +func (p *Packfile) nextObjectHeader() (*ObjectHeader, error) { + h, err := p.s.NextObjectHeader() + p.s.pendingObject = nil + return h, err +} + +func (p *Packfile) getDeltaObjectSize(buf *bytes.Buffer) int64 { + delta := buf.Bytes() + _, delta = decodeLEB128(delta) // skip src size + sz, _ := decodeLEB128(delta) + return int64(sz) +} + +func (p *Packfile) getObjectSize(h *ObjectHeader) (int64, error) { + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + return h.Length, nil + case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + + if _, _, err := p.s.NextObject(buf); err != nil { + return 0, err + } + + return p.getDeltaObjectSize(buf), nil + default: + return 0, ErrInvalidObject.AddDetails("type %q", h.Type) + } +} + +func (p *Packfile) getObjectType(h *ObjectHeader) (typ plumbing.ObjectType, err error) { + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + return h.Type, nil + case plumbing.REFDeltaObject, plumbing.OFSDeltaObject: + var offset int64 + if h.Type == plumbing.REFDeltaObject { + offset, err = p.FindOffset(h.Reference) + if err != nil { + return + } + } else { + offset = h.OffsetReference + } + + if baseType, ok := p.offsetToType[offset]; ok { + typ = baseType + } else { + h, err = p.objectHeaderAtOffset(offset) + if err != nil { + return + } + + typ, err = p.getObjectType(h) + if err != nil { + return + } + } + default: + err = ErrInvalidObject.AddDetails("type %q", h.Type) + } + + p.offsetToType[h.Offset] = typ + + return +} + +func (p *Packfile) objectAtOffset(offset int64, hash plumbing.Hash) (plumbing.EncodedObject, error) { + if obj, ok := p.cacheGet(hash); ok { + return obj, nil + } + + h, err := p.objectHeaderAtOffset(offset) + if err != nil { + if err == io.EOF || isInvalid(err) { + return nil, plumbing.ErrObjectNotFound + } + return nil, err + } + + return p.getNextObject(h, hash) +} + +func (p *Packfile) getNextObject(h *ObjectHeader, hash plumbing.Hash) (plumbing.EncodedObject, error) { + var err error + + // If we have no filesystem, we will return a MemoryObject instead + // of an FSObject. + if p.fs == nil { + return p.getNextMemoryObject(h) + } + + // If the object is small enough then read it completely into memory now since + // it is already read from disk into buffer anyway. For delta objects we want + // to perform the optimization too, but we have to be careful about applying + // small deltas on big objects. + var size int64 + if h.Length <= smallObjectThreshold { + if h.Type != plumbing.OFSDeltaObject && h.Type != plumbing.REFDeltaObject { + return p.getNextMemoryObject(h) + } + + // For delta objects we read the delta data and apply the small object + // optimization only if the expanded version of the object still meets + // the small object threshold condition. + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + if _, _, err := p.s.NextObject(buf); err != nil { + return nil, err + } + + size = p.getDeltaObjectSize(buf) + if size <= smallObjectThreshold { + var obj = new(plumbing.MemoryObject) + obj.SetSize(size) + if h.Type == plumbing.REFDeltaObject { + err = p.fillREFDeltaObjectContentWithBuffer(obj, h.Reference, buf) + } else { + err = p.fillOFSDeltaObjectContentWithBuffer(obj, h.OffsetReference, buf) + } + return obj, err + } + } else { + size, err = p.getObjectSize(h) + if err != nil { + return nil, err + } + } + + typ, err := p.getObjectType(h) + if err != nil { + return nil, err + } + + p.offsetToType[h.Offset] = typ + + return NewFSObject( + hash, + typ, + h.Offset, + size, + p.Index, + p.fs, + p.file.Name(), + p.deltaBaseCache, + ), nil +} + +func (p *Packfile) getObjectContent(offset int64) (io.ReadCloser, error) { + h, err := p.objectHeaderAtOffset(offset) + if err != nil { + return nil, err + } + + // getObjectContent is called from FSObject, so we have to explicitly + // get memory object here to avoid recursive cycle + obj, err := p.getNextMemoryObject(h) + if err != nil { + return nil, err + } + + return obj.Reader() +} + +func (p *Packfile) getNextMemoryObject(h *ObjectHeader) (plumbing.EncodedObject, error) { + var obj = new(plumbing.MemoryObject) + obj.SetSize(h.Length) + obj.SetType(h.Type) + + var err error + switch h.Type { + case plumbing.CommitObject, plumbing.TreeObject, plumbing.BlobObject, plumbing.TagObject: + err = p.fillRegularObjectContent(obj) + case plumbing.REFDeltaObject: + err = p.fillREFDeltaObjectContent(obj, h.Reference) + case plumbing.OFSDeltaObject: + err = p.fillOFSDeltaObjectContent(obj, h.OffsetReference) + default: + err = ErrInvalidObject.AddDetails("type %q", h.Type) + } + + if err != nil { + return nil, err + } + + p.offsetToType[h.Offset] = obj.Type() + + return obj, nil +} + +func (p *Packfile) fillRegularObjectContent(obj plumbing.EncodedObject) (err error) { + w, err := obj.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + _, _, err = p.s.NextObject(w) + p.cachePut(obj) + + return err +} + +func (p *Packfile) fillREFDeltaObjectContent(obj plumbing.EncodedObject, ref plumbing.Hash) error { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, _, err := p.s.NextObject(buf) + if err != nil { + return err + } + + return p.fillREFDeltaObjectContentWithBuffer(obj, ref, buf) +} + +func (p *Packfile) fillREFDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, ref plumbing.Hash, buf *bytes.Buffer) error { + var err error + + base, ok := p.cacheGet(ref) + if !ok { + base, err = p.Get(ref) + if err != nil { + return err + } + } + + obj.SetType(base.Type()) + err = ApplyDelta(obj, base, buf.Bytes()) + p.cachePut(obj) + + return err +} + +func (p *Packfile) fillOFSDeltaObjectContent(obj plumbing.EncodedObject, offset int64) error { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, _, err := p.s.NextObject(buf) + if err != nil { + return err + } + + return p.fillOFSDeltaObjectContentWithBuffer(obj, offset, buf) +} + +func (p *Packfile) fillOFSDeltaObjectContentWithBuffer(obj plumbing.EncodedObject, offset int64, buf *bytes.Buffer) error { + hash, err := p.FindHash(offset) + if err != nil { + return err + } + + base, err := p.objectAtOffset(offset, hash) + if err != nil { + return err + } + + obj.SetType(base.Type()) + err = ApplyDelta(obj, base, buf.Bytes()) + p.cachePut(obj) + + return err +} + +func (p *Packfile) cacheGet(h plumbing.Hash) (plumbing.EncodedObject, bool) { + if p.deltaBaseCache == nil { + return nil, false + } + + return p.deltaBaseCache.Get(h) +} + +func (p *Packfile) cachePut(obj plumbing.EncodedObject) { + if p.deltaBaseCache == nil { + return + } + + p.deltaBaseCache.Put(obj) +} + +// GetAll returns an iterator with all encoded objects in the packfile. +// The iterator returned is not thread-safe, it should be used in the same +// thread as the Packfile instance. +func (p *Packfile) GetAll() (storer.EncodedObjectIter, error) { + return p.GetByType(plumbing.AnyObject) +} + +// GetByType returns all the objects of the given type. +func (p *Packfile) GetByType(typ plumbing.ObjectType) (storer.EncodedObjectIter, error) { + switch typ { + case plumbing.AnyObject, + plumbing.BlobObject, + plumbing.TreeObject, + plumbing.CommitObject, + plumbing.TagObject: + entries, err := p.EntriesByOffset() + if err != nil { + return nil, err + } + + return &objectIter{ + // Easiest way to provide an object decoder is just to pass a Packfile + // instance. To not mess with the seeks, it's a new instance with a + // different scanner but the same cache and offset to hash map for + // reusing as much cache as possible. + p: p, + iter: entries, + typ: typ, + }, nil + default: + return nil, plumbing.ErrInvalidType + } +} + +// ID returns the ID of the packfile, which is the checksum at the end of it. +func (p *Packfile) ID() (plumbing.Hash, error) { + prev, err := p.file.Seek(-20, io.SeekEnd) + if err != nil { + return plumbing.ZeroHash, err + } + + var hash plumbing.Hash + if _, err := io.ReadFull(p.file, hash[:]); err != nil { + return plumbing.ZeroHash, err + } + + if _, err := p.file.Seek(prev, io.SeekStart); err != nil { + return plumbing.ZeroHash, err + } + + return hash, nil +} + +// Scanner returns the packfile's Scanner +func (p *Packfile) Scanner() *Scanner { + return p.s +} + +// Close the packfile and its resources. +func (p *Packfile) Close() error { + closer, ok := p.file.(io.Closer) + if !ok { + return nil + } + + return closer.Close() +} + +type objectIter struct { + p *Packfile + typ plumbing.ObjectType + iter idxfile.EntryIter +} + +func (i *objectIter) Next() (plumbing.EncodedObject, error) { + for { + e, err := i.iter.Next() + if err != nil { + return nil, err + } + + if i.typ != plumbing.AnyObject { + if typ, ok := i.p.offsetToType[int64(e.Offset)]; ok { + if typ != i.typ { + continue + } + } else if obj, ok := i.p.cacheGet(e.Hash); ok { + if obj.Type() != i.typ { + i.p.offsetToType[int64(e.Offset)] = obj.Type() + continue + } + return obj, nil + } else { + h, err := i.p.objectHeaderAtOffset(int64(e.Offset)) + if err != nil { + return nil, err + } + + if h.Type == plumbing.REFDeltaObject || h.Type == plumbing.OFSDeltaObject { + typ, err := i.p.getObjectType(h) + if err != nil { + return nil, err + } + if typ != i.typ { + i.p.offsetToType[int64(e.Offset)] = typ + continue + } + // getObjectType will seek in the file so we cannot use getNextObject safely + return i.p.objectAtOffset(int64(e.Offset), e.Hash) + } else { + if h.Type != i.typ { + i.p.offsetToType[int64(e.Offset)] = h.Type + continue + } + return i.p.getNextObject(h, e.Hash) + } + } + } + + obj, err := i.p.objectAtOffset(int64(e.Offset), e.Hash) + if err != nil { + return nil, err + } + + return obj, nil + } +} + +func (i *objectIter) ForEach(f func(plumbing.EncodedObject) error) error { + for { + o, err := i.Next() + if err != nil { + if err == io.EOF { + return nil + } + return err + } + + if err := f(o); err != nil { + return err + } + } +} + +func (i *objectIter) Close() { + i.iter.Close() +} + +// isInvalid checks whether an error is an os.PathError with an os.ErrInvalid +// error inside. It also checks for the windows error, which is different from +// os.ErrInvalid. +func isInvalid(err error) bool { + pe, ok := err.(*os.PathError) + if !ok { + return false + } + + errstr := pe.Err.Error() + return errstr == errInvalidUnix || errstr == errInvalidWindows +} + +// errInvalidWindows is the Windows equivalent to os.ErrInvalid +const errInvalidWindows = "The parameter is incorrect." + +var errInvalidUnix = os.ErrInvalid.Error() diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go new file mode 100644 index 0000000000..4b5a5708cc --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/parser.go @@ -0,0 +1,495 @@ +package packfile + +import ( + "bytes" + "errors" + "io" + stdioutil "io/ioutil" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/cache" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var ( + // ErrReferenceDeltaNotFound is returned when the reference delta is not + // found. + ErrReferenceDeltaNotFound = errors.New("reference delta not found") + + // ErrNotSeekableSource is returned when the source for the parser is not + // seekable and a storage was not provided, so it can't be parsed. + ErrNotSeekableSource = errors.New("parser source is not seekable and storage was not provided") + + // ErrDeltaNotCached is returned when the delta could not be found in cache. + ErrDeltaNotCached = errors.New("delta could not be found in cache") +) + +// Observer interface is implemented by index encoders. +type Observer interface { + // OnHeader is called when a new packfile is opened. + OnHeader(count uint32) error + // OnInflatedObjectHeader is called for each object header read. + OnInflatedObjectHeader(t plumbing.ObjectType, objSize int64, pos int64) error + // OnInflatedObjectContent is called for each decoded object. + OnInflatedObjectContent(h plumbing.Hash, pos int64, crc uint32, content []byte) error + // OnFooter is called when decoding is done. + OnFooter(h plumbing.Hash) error +} + +// Parser decodes a packfile and calls any observer associated to it. Is used +// to generate indexes. +type Parser struct { + storage storer.EncodedObjectStorer + scanner *Scanner + count uint32 + oi []*objectInfo + oiByHash map[plumbing.Hash]*objectInfo + oiByOffset map[int64]*objectInfo + hashOffset map[plumbing.Hash]int64 + checksum plumbing.Hash + + cache *cache.BufferLRU + // delta content by offset, only used if source is not seekable + deltas map[int64][]byte + + ob []Observer +} + +// NewParser creates a new Parser. The Scanner source must be seekable. +// If it's not, NewParserWithStorage should be used instead. +func NewParser(scanner *Scanner, ob ...Observer) (*Parser, error) { + return NewParserWithStorage(scanner, nil, ob...) +} + +// NewParserWithStorage creates a new Parser. The scanner source must either +// be seekable or a storage must be provided. +func NewParserWithStorage( + scanner *Scanner, + storage storer.EncodedObjectStorer, + ob ...Observer, +) (*Parser, error) { + if !scanner.IsSeekable && storage == nil { + return nil, ErrNotSeekableSource + } + + var deltas map[int64][]byte + if !scanner.IsSeekable { + deltas = make(map[int64][]byte) + } + + return &Parser{ + storage: storage, + scanner: scanner, + ob: ob, + count: 0, + cache: cache.NewBufferLRUDefault(), + deltas: deltas, + }, nil +} + +func (p *Parser) forEachObserver(f func(o Observer) error) error { + for _, o := range p.ob { + if err := f(o); err != nil { + return err + } + } + return nil +} + +func (p *Parser) onHeader(count uint32) error { + return p.forEachObserver(func(o Observer) error { + return o.OnHeader(count) + }) +} + +func (p *Parser) onInflatedObjectHeader( + t plumbing.ObjectType, + objSize int64, + pos int64, +) error { + return p.forEachObserver(func(o Observer) error { + return o.OnInflatedObjectHeader(t, objSize, pos) + }) +} + +func (p *Parser) onInflatedObjectContent( + h plumbing.Hash, + pos int64, + crc uint32, + content []byte, +) error { + return p.forEachObserver(func(o Observer) error { + return o.OnInflatedObjectContent(h, pos, crc, content) + }) +} + +func (p *Parser) onFooter(h plumbing.Hash) error { + return p.forEachObserver(func(o Observer) error { + return o.OnFooter(h) + }) +} + +// Parse start decoding phase of the packfile. +func (p *Parser) Parse() (plumbing.Hash, error) { + if err := p.init(); err != nil { + return plumbing.ZeroHash, err + } + + if err := p.indexObjects(); err != nil { + return plumbing.ZeroHash, err + } + + var err error + p.checksum, err = p.scanner.Checksum() + if err != nil && err != io.EOF { + return plumbing.ZeroHash, err + } + + if err := p.resolveDeltas(); err != nil { + return plumbing.ZeroHash, err + } + + if err := p.onFooter(p.checksum); err != nil { + return plumbing.ZeroHash, err + } + + return p.checksum, nil +} + +func (p *Parser) init() error { + _, c, err := p.scanner.Header() + if err != nil { + return err + } + + if err := p.onHeader(c); err != nil { + return err + } + + p.count = c + p.oiByHash = make(map[plumbing.Hash]*objectInfo, p.count) + p.oiByOffset = make(map[int64]*objectInfo, p.count) + p.oi = make([]*objectInfo, p.count) + + return nil +} + +func (p *Parser) indexObjects() error { + buf := new(bytes.Buffer) + + for i := uint32(0); i < p.count; i++ { + buf.Reset() + + oh, err := p.scanner.NextObjectHeader() + if err != nil { + return err + } + + delta := false + var ota *objectInfo + switch t := oh.Type; t { + case plumbing.OFSDeltaObject: + delta = true + + parent, ok := p.oiByOffset[oh.OffsetReference] + if !ok { + return plumbing.ErrObjectNotFound + } + + ota = newDeltaObject(oh.Offset, oh.Length, t, parent) + parent.Children = append(parent.Children, ota) + case plumbing.REFDeltaObject: + delta = true + parent, ok := p.oiByHash[oh.Reference] + if !ok { + // can't find referenced object in this pack file + // this must be a "thin" pack. + parent = &objectInfo{ //Placeholder parent + SHA1: oh.Reference, + ExternalRef: true, // mark as an external reference that must be resolved + Type: plumbing.AnyObject, + DiskType: plumbing.AnyObject, + } + p.oiByHash[oh.Reference] = parent + } + ota = newDeltaObject(oh.Offset, oh.Length, t, parent) + parent.Children = append(parent.Children, ota) + + default: + ota = newBaseObject(oh.Offset, oh.Length, t) + } + + _, crc, err := p.scanner.NextObject(buf) + if err != nil { + return err + } + + ota.Crc32 = crc + ota.Length = oh.Length + + data := buf.Bytes() + if !delta { + sha1, err := getSHA1(ota.Type, data) + if err != nil { + return err + } + + ota.SHA1 = sha1 + p.oiByHash[ota.SHA1] = ota + } + + if p.storage != nil && !delta { + obj := new(plumbing.MemoryObject) + obj.SetSize(oh.Length) + obj.SetType(oh.Type) + if _, err := obj.Write(data); err != nil { + return err + } + + if _, err := p.storage.SetEncodedObject(obj); err != nil { + return err + } + } + + if delta && !p.scanner.IsSeekable { + p.deltas[oh.Offset] = make([]byte, len(data)) + copy(p.deltas[oh.Offset], data) + } + + p.oiByOffset[oh.Offset] = ota + p.oi[i] = ota + } + + return nil +} + +func (p *Parser) resolveDeltas() error { + buf := &bytes.Buffer{} + for _, obj := range p.oi { + buf.Reset() + err := p.get(obj, buf) + if err != nil { + return err + } + content := buf.Bytes() + + if err := p.onInflatedObjectHeader(obj.Type, obj.Length, obj.Offset); err != nil { + return err + } + + if err := p.onInflatedObjectContent(obj.SHA1, obj.Offset, obj.Crc32, content); err != nil { + return err + } + + if !obj.IsDelta() && len(obj.Children) > 0 { + for _, child := range obj.Children { + if err := p.resolveObject(stdioutil.Discard, child, content); err != nil { + return err + } + } + + // Remove the delta from the cache. + if obj.DiskType.IsDelta() && !p.scanner.IsSeekable { + delete(p.deltas, obj.Offset) + } + } + } + + return nil +} + +func (p *Parser) get(o *objectInfo, buf *bytes.Buffer) (err error) { + if !o.ExternalRef { // skip cache check for placeholder parents + b, ok := p.cache.Get(o.Offset) + if ok { + _, err := buf.Write(b) + return err + } + } + + // If it's not on the cache and is not a delta we can try to find it in the + // storage, if there's one. External refs must enter here. + if p.storage != nil && !o.Type.IsDelta() { + var e plumbing.EncodedObject + e, err = p.storage.EncodedObject(plumbing.AnyObject, o.SHA1) + if err != nil { + return err + } + o.Type = e.Type() + + var r io.ReadCloser + r, err = e.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + _, err = buf.ReadFrom(io.LimitReader(r, e.Size())) + return err + } + + if o.ExternalRef { + // we were not able to resolve a ref in a thin pack + return ErrReferenceDeltaNotFound + } + + if o.DiskType.IsDelta() { + b := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(b) + b.Reset() + err := p.get(o.Parent, b) + if err != nil { + return err + } + base := b.Bytes() + + err = p.resolveObject(buf, o, base) + if err != nil { + return err + } + } else { + err := p.readData(buf, o) + if err != nil { + return err + } + } + + if len(o.Children) > 0 { + data := make([]byte, buf.Len()) + copy(data, buf.Bytes()) + p.cache.Put(o.Offset, data) + } + return nil +} + +func (p *Parser) resolveObject( + w io.Writer, + o *objectInfo, + base []byte, +) error { + if !o.DiskType.IsDelta() { + return nil + } + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + err := p.readData(buf, o) + if err != nil { + return err + } + data := buf.Bytes() + + data, err = applyPatchBase(o, data, base) + if err != nil { + return err + } + + if p.storage != nil { + obj := new(plumbing.MemoryObject) + obj.SetSize(o.Size()) + obj.SetType(o.Type) + if _, err := obj.Write(data); err != nil { + return err + } + + if _, err := p.storage.SetEncodedObject(obj); err != nil { + return err + } + } + _, err = w.Write(data) + return err +} + +func (p *Parser) readData(w io.Writer, o *objectInfo) error { + if !p.scanner.IsSeekable && o.DiskType.IsDelta() { + data, ok := p.deltas[o.Offset] + if !ok { + return ErrDeltaNotCached + } + _, err := w.Write(data) + return err + } + + if _, err := p.scanner.SeekObjectHeader(o.Offset); err != nil { + return err + } + + if _, _, err := p.scanner.NextObject(w); err != nil { + return err + } + return nil +} + +func applyPatchBase(ota *objectInfo, data, base []byte) ([]byte, error) { + patched, err := PatchDelta(base, data) + if err != nil { + return nil, err + } + + if ota.SHA1 == plumbing.ZeroHash { + ota.Type = ota.Parent.Type + sha1, err := getSHA1(ota.Type, patched) + if err != nil { + return nil, err + } + + ota.SHA1 = sha1 + ota.Length = int64(len(patched)) + } + + return patched, nil +} + +func getSHA1(t plumbing.ObjectType, data []byte) (plumbing.Hash, error) { + hasher := plumbing.NewHasher(t, int64(len(data))) + if _, err := hasher.Write(data); err != nil { + return plumbing.ZeroHash, err + } + + return hasher.Sum(), nil +} + +type objectInfo struct { + Offset int64 + Length int64 + Type plumbing.ObjectType + DiskType plumbing.ObjectType + ExternalRef bool // indicates this is an external reference in a thin pack file + + Crc32 uint32 + + Parent *objectInfo + Children []*objectInfo + SHA1 plumbing.Hash +} + +func newBaseObject(offset, length int64, t plumbing.ObjectType) *objectInfo { + return newDeltaObject(offset, length, t, nil) +} + +func newDeltaObject( + offset, length int64, + t plumbing.ObjectType, + parent *objectInfo, +) *objectInfo { + obj := &objectInfo{ + Offset: offset, + Length: length, + Type: t, + DiskType: t, + Crc32: 0, + Parent: parent, + } + + return obj +} + +func (o *objectInfo) IsDelta() bool { + return o.Type.IsDelta() +} + +func (o *objectInfo) Size() int64 { + return o.Length +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go new file mode 100644 index 0000000000..9e90f30a72 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go @@ -0,0 +1,252 @@ +package packfile + +import ( + "bytes" + "errors" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// See https://github.com/git/git/blob/49fa3dc76179e04b0833542fa52d0f287a4955ac/delta.h +// https://github.com/git/git/blob/c2c5f6b1e479f2c38e0e01345350620944e3527f/patch-delta.c, +// and https://github.com/tarruda/node-git-core/blob/master/src/js/delta.js +// for details about the delta format. + +const deltaSizeMin = 4 + +// ApplyDelta writes to target the result of applying the modification deltas in delta to base. +func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) { + r, err := base.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + w, err := target.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + _, err = buf.ReadFrom(r) + if err != nil { + return err + } + src := buf.Bytes() + + dst := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(dst) + dst.Reset() + err = patchDelta(dst, src, delta) + if err != nil { + return err + } + + target.SetSize(int64(dst.Len())) + + b := byteSlicePool.Get().([]byte) + _, err = io.CopyBuffer(w, dst, b) + byteSlicePool.Put(b) + return err +} + +var ( + ErrInvalidDelta = errors.New("invalid delta") + ErrDeltaCmd = errors.New("wrong delta command") +) + +// PatchDelta returns the result of applying the modification deltas in delta to src. +// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command +// is not copy from source or copy from delta (ErrDeltaCmd). +func PatchDelta(src, delta []byte) ([]byte, error) { + b := &bytes.Buffer{} + if err := patchDelta(b, src, delta); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +func patchDelta(dst *bytes.Buffer, src, delta []byte) error { + if len(delta) < deltaSizeMin { + return ErrInvalidDelta + } + + srcSz, delta := decodeLEB128(delta) + if srcSz != uint(len(src)) { + return ErrInvalidDelta + } + + targetSz, delta := decodeLEB128(delta) + remainingTargetSz := targetSz + + var cmd byte + dst.Grow(int(targetSz)) + for { + if len(delta) == 0 { + return ErrInvalidDelta + } + + cmd = delta[0] + delta = delta[1:] + if isCopyFromSrc(cmd) { + var offset, sz uint + var err error + offset, delta, err = decodeOffset(cmd, delta) + if err != nil { + return err + } + + sz, delta, err = decodeSize(cmd, delta) + if err != nil { + return err + } + + if invalidSize(sz, targetSz) || + invalidOffsetSize(offset, sz, srcSz) { + break + } + dst.Write(src[offset : offset+sz]) + remainingTargetSz -= sz + } else if isCopyFromDelta(cmd) { + sz := uint(cmd) // cmd is the size itself + if invalidSize(sz, targetSz) { + return ErrInvalidDelta + } + + if uint(len(delta)) < sz { + return ErrInvalidDelta + } + + dst.Write(delta[0:sz]) + remainingTargetSz -= sz + delta = delta[sz:] + } else { + return ErrDeltaCmd + } + + if remainingTargetSz <= 0 { + break + } + } + + return nil +} + +// Decodes a number encoded as an unsigned LEB128 at the start of some +// binary data and returns the decoded number and the rest of the +// stream. +// +// This must be called twice on the delta data buffer, first to get the +// expected source buffer size, and again to get the target buffer size. +func decodeLEB128(input []byte) (uint, []byte) { + var num, sz uint + var b byte + for { + b = input[sz] + num |= (uint(b) & payload) << (sz * 7) // concats 7 bits chunks + sz++ + + if uint(b)&continuation == 0 || sz == uint(len(input)) { + break + } + } + + return num, input[sz:] +} + +const ( + payload = 0x7f // 0111 1111 + continuation = 0x80 // 1000 0000 +) + +func isCopyFromSrc(cmd byte) bool { + return (cmd & 0x80) != 0 +} + +func isCopyFromDelta(cmd byte) bool { + return (cmd&0x80) == 0 && cmd != 0 +} + +func decodeOffset(cmd byte, delta []byte) (uint, []byte, error) { + var offset uint + if (cmd & 0x01) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset = uint(delta[0]) + delta = delta[1:] + } + if (cmd & 0x02) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset |= uint(delta[0]) << 8 + delta = delta[1:] + } + if (cmd & 0x04) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset |= uint(delta[0]) << 16 + delta = delta[1:] + } + if (cmd & 0x08) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + offset |= uint(delta[0]) << 24 + delta = delta[1:] + } + + return offset, delta, nil +} + +func decodeSize(cmd byte, delta []byte) (uint, []byte, error) { + var sz uint + if (cmd & 0x10) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + sz = uint(delta[0]) + delta = delta[1:] + } + if (cmd & 0x20) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + sz |= uint(delta[0]) << 8 + delta = delta[1:] + } + if (cmd & 0x40) != 0 { + if len(delta) == 0 { + return 0, nil, ErrInvalidDelta + } + sz |= uint(delta[0]) << 16 + delta = delta[1:] + } + if sz == 0 { + sz = 0x10000 + } + + return sz, delta, nil +} + +func invalidSize(sz, targetSz uint) bool { + return sz > targetSz +} + +func invalidOffsetSize(offset, sz, srcSz uint) bool { + return sumOverflows(offset, sz) || + offset+sz > srcSz +} + +func sumOverflows(a, b uint) bool { + return a+b < a +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go new file mode 100644 index 0000000000..6e6a687886 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/scanner.go @@ -0,0 +1,466 @@ +package packfile + +import ( + "bufio" + "bytes" + "compress/zlib" + "fmt" + "hash" + "hash/crc32" + "io" + stdioutil "io/ioutil" + "sync" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/utils/binary" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +var ( + // ErrEmptyPackfile is returned by ReadHeader when no data is found in the packfile + ErrEmptyPackfile = NewError("empty packfile") + // ErrBadSignature is returned by ReadHeader when the signature in the packfile is incorrect. + ErrBadSignature = NewError("malformed pack file signature") + // ErrUnsupportedVersion is returned by ReadHeader when the packfile version is + // different than VersionSupported. + ErrUnsupportedVersion = NewError("unsupported packfile version") + // ErrSeekNotSupported returned if seek is not support + ErrSeekNotSupported = NewError("not seek support") +) + +// ObjectHeader contains the information related to the object, this information +// is collected from the previous bytes to the content of the object. +type ObjectHeader struct { + Type plumbing.ObjectType + Offset int64 + Length int64 + Reference plumbing.Hash + OffsetReference int64 +} + +type Scanner struct { + r *scannerReader + crc hash.Hash32 + + // pendingObject is used to detect if an object has been read, or still + // is waiting to be read + pendingObject *ObjectHeader + version, objects uint32 + + // lsSeekable says if this scanner can do Seek or not, to have a Scanner + // seekable a r implementing io.Seeker is required + IsSeekable bool +} + +// NewScanner returns a new Scanner based on a reader, if the given reader +// implements io.ReadSeeker the Scanner will be also Seekable +func NewScanner(r io.Reader) *Scanner { + _, ok := r.(io.ReadSeeker) + + crc := crc32.NewIEEE() + return &Scanner{ + r: newScannerReader(r, crc), + crc: crc, + IsSeekable: ok, + } +} + +func (s *Scanner) Reset(r io.Reader) { + _, ok := r.(io.ReadSeeker) + + s.r.Reset(r) + s.crc.Reset() + s.IsSeekable = ok + s.pendingObject = nil + s.version = 0 + s.objects = 0 +} + +// Header reads the whole packfile header (signature, version and object count). +// It returns the version and the object count and performs checks on the +// validity of the signature and the version fields. +func (s *Scanner) Header() (version, objects uint32, err error) { + if s.version != 0 { + return s.version, s.objects, nil + } + + sig, err := s.readSignature() + if err != nil { + if err == io.EOF { + err = ErrEmptyPackfile + } + + return + } + + if !s.isValidSignature(sig) { + err = ErrBadSignature + return + } + + version, err = s.readVersion() + s.version = version + if err != nil { + return + } + + if !s.isSupportedVersion(version) { + err = ErrUnsupportedVersion.AddDetails("%d", version) + return + } + + objects, err = s.readCount() + s.objects = objects + return +} + +// readSignature reads an returns the signature field in the packfile. +func (s *Scanner) readSignature() ([]byte, error) { + var sig = make([]byte, 4) + if _, err := io.ReadFull(s.r, sig); err != nil { + return []byte{}, err + } + + return sig, nil +} + +// isValidSignature returns if sig is a valid packfile signature. +func (s *Scanner) isValidSignature(sig []byte) bool { + return bytes.Equal(sig, signature) +} + +// readVersion reads and returns the version field of a packfile. +func (s *Scanner) readVersion() (uint32, error) { + return binary.ReadUint32(s.r) +} + +// isSupportedVersion returns whether version v is supported by the parser. +// The current supported version is VersionSupported, defined above. +func (s *Scanner) isSupportedVersion(v uint32) bool { + return v == VersionSupported +} + +// readCount reads and returns the count of objects field of a packfile. +func (s *Scanner) readCount() (uint32, error) { + return binary.ReadUint32(s.r) +} + +// SeekObjectHeader seeks to specified offset and returns the ObjectHeader +// for the next object in the reader +func (s *Scanner) SeekObjectHeader(offset int64) (*ObjectHeader, error) { + // if seeking we assume that you are not interested in the header + if s.version == 0 { + s.version = VersionSupported + } + + if _, err := s.r.Seek(offset, io.SeekStart); err != nil { + return nil, err + } + + h, err := s.nextObjectHeader() + if err != nil { + return nil, err + } + + h.Offset = offset + return h, nil +} + +// NextObjectHeader returns the ObjectHeader for the next object in the reader +func (s *Scanner) NextObjectHeader() (*ObjectHeader, error) { + if err := s.doPending(); err != nil { + return nil, err + } + + offset, err := s.r.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + + h, err := s.nextObjectHeader() + if err != nil { + return nil, err + } + + h.Offset = offset + return h, nil +} + +// nextObjectHeader returns the ObjectHeader for the next object in the reader +// without the Offset field +func (s *Scanner) nextObjectHeader() (*ObjectHeader, error) { + s.r.Flush() + s.crc.Reset() + + h := &ObjectHeader{} + s.pendingObject = h + + var err error + h.Offset, err = s.r.Seek(0, io.SeekCurrent) + if err != nil { + return nil, err + } + + h.Type, h.Length, err = s.readObjectTypeAndLength() + if err != nil { + return nil, err + } + + switch h.Type { + case plumbing.OFSDeltaObject: + no, err := binary.ReadVariableWidthInt(s.r) + if err != nil { + return nil, err + } + + h.OffsetReference = h.Offset - no + case plumbing.REFDeltaObject: + var err error + h.Reference, err = binary.ReadHash(s.r) + if err != nil { + return nil, err + } + } + + return h, nil +} + +func (s *Scanner) doPending() error { + if s.version == 0 { + var err error + s.version, s.objects, err = s.Header() + if err != nil { + return err + } + } + + return s.discardObjectIfNeeded() +} + +func (s *Scanner) discardObjectIfNeeded() error { + if s.pendingObject == nil { + return nil + } + + h := s.pendingObject + n, _, err := s.NextObject(stdioutil.Discard) + if err != nil { + return err + } + + if n != h.Length { + return fmt.Errorf( + "error discarding object, discarded %d, expected %d", + n, h.Length, + ) + } + + return nil +} + +// ReadObjectTypeAndLength reads and returns the object type and the +// length field from an object entry in a packfile. +func (s *Scanner) readObjectTypeAndLength() (plumbing.ObjectType, int64, error) { + t, c, err := s.readType() + if err != nil { + return t, 0, err + } + + l, err := s.readLength(c) + + return t, l, err +} + +func (s *Scanner) readType() (plumbing.ObjectType, byte, error) { + var c byte + var err error + if c, err = s.r.ReadByte(); err != nil { + return plumbing.ObjectType(0), 0, err + } + + typ := parseType(c) + + return typ, c, nil +} + +func parseType(b byte) plumbing.ObjectType { + return plumbing.ObjectType((b & maskType) >> firstLengthBits) +} + +// the length is codified in the last 4 bits of the first byte and in +// the last 7 bits of subsequent bytes. Last byte has a 0 MSB. +func (s *Scanner) readLength(first byte) (int64, error) { + length := int64(first & maskFirstLength) + + c := first + shift := firstLengthBits + var err error + for c&maskContinue > 0 { + if c, err = s.r.ReadByte(); err != nil { + return 0, err + } + + length += int64(c&maskLength) << shift + shift += lengthBits + } + + return length, nil +} + +// NextObject writes the content of the next object into the reader, returns +// the number of bytes written, the CRC32 of the content and an error, if any +func (s *Scanner) NextObject(w io.Writer) (written int64, crc32 uint32, err error) { + s.pendingObject = nil + written, err = s.copyObject(w) + + s.r.Flush() + crc32 = s.crc.Sum32() + s.crc.Reset() + + return +} + +// ReadRegularObject reads and write a non-deltified object +// from it zlib stream in an object entry in the packfile. +func (s *Scanner) copyObject(w io.Writer) (n int64, err error) { + zr := zlibReaderPool.Get().(io.ReadCloser) + defer zlibReaderPool.Put(zr) + + if err = zr.(zlib.Resetter).Reset(s.r, nil); err != nil { + return 0, fmt.Errorf("zlib reset error: %s", err) + } + + defer ioutil.CheckClose(zr, &err) + buf := byteSlicePool.Get().([]byte) + n, err = io.CopyBuffer(w, zr, buf) + byteSlicePool.Put(buf) + return +} + +var byteSlicePool = sync.Pool{ + New: func() interface{} { + return make([]byte, 32*1024) + }, +} + +// SeekFromStart sets a new offset from start, returns the old position before +// the change. +func (s *Scanner) SeekFromStart(offset int64) (previous int64, err error) { + // if seeking we assume that you are not interested in the header + if s.version == 0 { + s.version = VersionSupported + } + + previous, err = s.r.Seek(0, io.SeekCurrent) + if err != nil { + return -1, err + } + + _, err = s.r.Seek(offset, io.SeekStart) + return previous, err +} + +// Checksum returns the checksum of the packfile +func (s *Scanner) Checksum() (plumbing.Hash, error) { + err := s.discardObjectIfNeeded() + if err != nil { + return plumbing.ZeroHash, err + } + + return binary.ReadHash(s.r) +} + +// Close reads the reader until io.EOF +func (s *Scanner) Close() error { + buf := byteSlicePool.Get().([]byte) + _, err := io.CopyBuffer(stdioutil.Discard, s.r, buf) + byteSlicePool.Put(buf) + return err +} + +// Flush is a no-op (deprecated) +func (s *Scanner) Flush() error { + return nil +} + +// scannerReader has the following characteristics: +// - Provides an io.SeekReader impl for bufio.Reader, when the underlying +// reader supports it. +// - Keeps track of the current read position, for when the underlying reader +// isn't an io.SeekReader, but we still want to know the current offset. +// - Writes to the hash writer what it reads, with the aid of a smaller buffer. +// The buffer helps avoid a performance penality for performing small writes +// to the crc32 hash writer. +type scannerReader struct { + reader io.Reader + crc io.Writer + rbuf *bufio.Reader + wbuf *bufio.Writer + offset int64 +} + +func newScannerReader(r io.Reader, h io.Writer) *scannerReader { + sr := &scannerReader{ + rbuf: bufio.NewReader(nil), + wbuf: bufio.NewWriterSize(nil, 64), + crc: h, + } + sr.Reset(r) + + return sr +} + +func (r *scannerReader) Reset(reader io.Reader) { + r.reader = reader + r.rbuf.Reset(r.reader) + r.wbuf.Reset(r.crc) + + r.offset = 0 + if seeker, ok := r.reader.(io.ReadSeeker); ok { + r.offset, _ = seeker.Seek(0, io.SeekCurrent) + } +} + +func (r *scannerReader) Read(p []byte) (n int, err error) { + n, err = r.rbuf.Read(p) + + r.offset += int64(n) + if _, err := r.wbuf.Write(p[:n]); err != nil { + return n, err + } + return +} + +func (r *scannerReader) ReadByte() (b byte, err error) { + b, err = r.rbuf.ReadByte() + if err == nil { + r.offset++ + return b, r.wbuf.WriteByte(b) + } + return +} + +func (r *scannerReader) Flush() error { + return r.wbuf.Flush() +} + +// Seek seeks to a location. If the underlying reader is not an io.ReadSeeker, +// then only whence=io.SeekCurrent is supported, any other operation fails. +func (r *scannerReader) Seek(offset int64, whence int) (int64, error) { + var err error + + if seeker, ok := r.reader.(io.ReadSeeker); !ok { + if whence != io.SeekCurrent || offset != 0 { + return -1, ErrSeekNotSupported + } + } else { + if whence == io.SeekCurrent && offset == 0 { + return r.offset, nil + } + + r.offset, err = seeker.Seek(offset, whence) + r.rbuf.Reset(r.reader) + } + + return r.offset, err +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go new file mode 100644 index 0000000000..6d409795b0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/encoder.go @@ -0,0 +1,122 @@ +// Package pktline implements reading payloads form pkt-lines and encoding +// pkt-lines from payloads. +package pktline + +import ( + "bytes" + "errors" + "fmt" + "io" +) + +// An Encoder writes pkt-lines to an output stream. +type Encoder struct { + w io.Writer +} + +const ( + // MaxPayloadSize is the maximum payload size of a pkt-line in bytes. + MaxPayloadSize = 65516 + + // For compatibility with canonical Git implementation, accept longer pkt-lines + OversizePayloadMax = 65520 +) + +var ( + // FlushPkt are the contents of a flush-pkt pkt-line. + FlushPkt = []byte{'0', '0', '0', '0'} + // Flush is the payload to use with the Encode method to encode a flush-pkt. + Flush = []byte{} + // FlushString is the payload to use with the EncodeString method to encode a flush-pkt. + FlushString = "" + // ErrPayloadTooLong is returned by the Encode methods when any of the + // provided payloads is bigger than MaxPayloadSize. + ErrPayloadTooLong = errors.New("payload is too long") +) + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + } +} + +// Flush encodes a flush-pkt to the output stream. +func (e *Encoder) Flush() error { + _, err := e.w.Write(FlushPkt) + return err +} + +// Encode encodes a pkt-line with the payload specified and write it to +// the output stream. If several payloads are specified, each of them +// will get streamed in their own pkt-lines. +func (e *Encoder) Encode(payloads ...[]byte) error { + for _, p := range payloads { + if err := e.encodeLine(p); err != nil { + return err + } + } + + return nil +} + +func (e *Encoder) encodeLine(p []byte) error { + if len(p) > MaxPayloadSize { + return ErrPayloadTooLong + } + + if bytes.Equal(p, Flush) { + return e.Flush() + } + + n := len(p) + 4 + if _, err := e.w.Write(asciiHex16(n)); err != nil { + return err + } + _, err := e.w.Write(p) + return err +} + +// Returns the hexadecimal ascii representation of the 16 less +// significant bits of n. The length of the returned slice will always +// be 4. Example: if n is 1234 (0x4d2), the return value will be +// []byte{'0', '4', 'd', '2'}. +func asciiHex16(n int) []byte { + var ret [4]byte + ret[0] = byteToASCIIHex(byte(n & 0xf000 >> 12)) + ret[1] = byteToASCIIHex(byte(n & 0x0f00 >> 8)) + ret[2] = byteToASCIIHex(byte(n & 0x00f0 >> 4)) + ret[3] = byteToASCIIHex(byte(n & 0x000f)) + + return ret[:] +} + +// turns a byte into its hexadecimal ascii representation. Example: +// from 11 (0xb) to 'b'. +func byteToASCIIHex(n byte) byte { + if n < 10 { + return '0' + n + } + + return 'a' - 10 + n +} + +// EncodeString works similarly as Encode but payloads are specified as strings. +func (e *Encoder) EncodeString(payloads ...string) error { + for _, p := range payloads { + if err := e.Encode([]byte(p)); err != nil { + return err + } + } + + return nil +} + +// Encodef encodes a single pkt-line with the payload formatted as +// the format specifier. The rest of the arguments will be used in +// the format string. +func (e *Encoder) Encodef(format string, a ...interface{}) error { + return e.EncodeString( + fmt.Sprintf(format, a...), + ) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go new file mode 100644 index 0000000000..99aab46e88 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go @@ -0,0 +1,134 @@ +package pktline + +import ( + "errors" + "io" +) + +const ( + lenSize = 4 +) + +// ErrInvalidPktLen is returned by Err() when an invalid pkt-len is found. +var ErrInvalidPktLen = errors.New("invalid pkt-len found") + +// Scanner provides a convenient interface for reading the payloads of a +// series of pkt-lines. It takes an io.Reader providing the source, +// which then can be tokenized through repeated calls to the Scan +// method. +// +// After each Scan call, the Bytes method will return the payload of the +// corresponding pkt-line on a shared buffer, which will be 65516 bytes +// or smaller. Flush pkt-lines are represented by empty byte slices. +// +// Scanning stops at EOF or the first I/O error. +type Scanner struct { + r io.Reader // The reader provided by the client + err error // Sticky error + payload []byte // Last pkt-payload + len [lenSize]byte // Last pkt-len +} + +// NewScanner returns a new Scanner to read from r. +func NewScanner(r io.Reader) *Scanner { + return &Scanner{ + r: r, + } +} + +// Err returns the first error encountered by the Scanner. +func (s *Scanner) Err() error { + return s.err +} + +// Scan advances the Scanner to the next pkt-line, whose payload will +// then be available through the Bytes method. Scanning stops at EOF +// or the first I/O error. After Scan returns false, the Err method +// will return any error that occurred during scanning, except that if +// it was io.EOF, Err will return nil. +func (s *Scanner) Scan() bool { + var l int + l, s.err = s.readPayloadLen() + if s.err == io.EOF { + s.err = nil + return false + } + if s.err != nil { + return false + } + + if cap(s.payload) < l { + s.payload = make([]byte, 0, l) + } + + if _, s.err = io.ReadFull(s.r, s.payload[:l]); s.err != nil { + return false + } + s.payload = s.payload[:l] + + return true +} + +// Bytes returns the most recent payload generated by a call to Scan. +// The underlying array may point to data that will be overwritten by a +// subsequent call to Scan. It does no allocation. +func (s *Scanner) Bytes() []byte { + return s.payload +} + +// Method readPayloadLen returns the payload length by reading the +// pkt-len and subtracting the pkt-len size. +func (s *Scanner) readPayloadLen() (int, error) { + if _, err := io.ReadFull(s.r, s.len[:]); err != nil { + if err == io.ErrUnexpectedEOF { + return 0, ErrInvalidPktLen + } + + return 0, err + } + + n, err := hexDecode(s.len) + if err != nil { + return 0, err + } + + switch { + case n == 0: + return 0, nil + case n <= lenSize: + return 0, ErrInvalidPktLen + case n > OversizePayloadMax+lenSize: + return 0, ErrInvalidPktLen + default: + return n - lenSize, nil + } +} + +// Turns the hexadecimal representation of a number in a byte slice into +// a number. This function substitute strconv.ParseUint(string(buf), 16, +// 16) and/or hex.Decode, to avoid generating new strings, thus helping the +// GC. +func hexDecode(buf [lenSize]byte) (int, error) { + var ret int + for i := 0; i < lenSize; i++ { + n, err := asciiHexToByte(buf[i]) + if err != nil { + return 0, ErrInvalidPktLen + } + ret = 16*ret + int(n) + } + return ret, nil +} + +// turns the hexadecimal ascii representation of a byte into its +// numerical value. Example: from 'b' to 11 (0xb). +func asciiHexToByte(b byte) (byte, error) { + switch { + case b >= '0' && b <= '9': + return b - '0', nil + case b >= 'a' && b <= 'f': + return b - 'a' + 10, nil + default: + return 0, ErrInvalidPktLen + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/hash.go b/vendor/github.com/go-git/go-git/v5/plumbing/hash.go new file mode 100644 index 0000000000..afc602a9ec --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/hash.go @@ -0,0 +1,83 @@ +package plumbing + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "hash" + "sort" + "strconv" +) + +// Hash SHA1 hashed content +type Hash [20]byte + +// ZeroHash is Hash with value zero +var ZeroHash Hash + +// ComputeHash compute the hash for a given ObjectType and content +func ComputeHash(t ObjectType, content []byte) Hash { + h := NewHasher(t, int64(len(content))) + h.Write(content) + return h.Sum() +} + +// NewHash return a new Hash from a hexadecimal hash representation +func NewHash(s string) Hash { + b, _ := hex.DecodeString(s) + + var h Hash + copy(h[:], b) + + return h +} + +func (h Hash) IsZero() bool { + var empty Hash + return h == empty +} + +func (h Hash) String() string { + return hex.EncodeToString(h[:]) +} + +type Hasher struct { + hash.Hash +} + +func NewHasher(t ObjectType, size int64) Hasher { + h := Hasher{sha1.New()} + h.Write(t.Bytes()) + h.Write([]byte(" ")) + h.Write([]byte(strconv.FormatInt(size, 10))) + h.Write([]byte{0}) + return h +} + +func (h Hasher) Sum() (hash Hash) { + copy(hash[:], h.Hash.Sum(nil)) + return +} + +// HashesSort sorts a slice of Hashes in increasing order. +func HashesSort(a []Hash) { + sort.Sort(HashSlice(a)) +} + +// HashSlice attaches the methods of sort.Interface to []Hash, sorting in +// increasing order. +type HashSlice []Hash + +func (p HashSlice) Len() int { return len(p) } +func (p HashSlice) Less(i, j int) bool { return bytes.Compare(p[i][:], p[j][:]) < 0 } +func (p HashSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// IsHash returns true if the given string is a valid hash. +func IsHash(s string) bool { + if len(s) != 40 { + return false + } + + _, err := hex.DecodeString(s) + return err == nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/memory.go b/vendor/github.com/go-git/go-git/v5/plumbing/memory.go new file mode 100644 index 0000000000..21337cc0dd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/memory.go @@ -0,0 +1,72 @@ +package plumbing + +import ( + "bytes" + "io" +) + +// MemoryObject on memory Object implementation +type MemoryObject struct { + t ObjectType + h Hash + cont []byte + sz int64 +} + +// Hash returns the object Hash, the hash is calculated on-the-fly the first +// time it's called, in all subsequent calls the same Hash is returned even +// if the type or the content have changed. The Hash is only generated if the +// size of the content is exactly the object size. +func (o *MemoryObject) Hash() Hash { + if o.h == ZeroHash && int64(len(o.cont)) == o.sz { + o.h = ComputeHash(o.t, o.cont) + } + + return o.h +} + +// Type return the ObjectType +func (o *MemoryObject) Type() ObjectType { return o.t } + +// SetType sets the ObjectType +func (o *MemoryObject) SetType(t ObjectType) { o.t = t } + +// Size return the size of the object +func (o *MemoryObject) Size() int64 { return o.sz } + +// SetSize set the object size, a content of the given size should be written +// afterwards +func (o *MemoryObject) SetSize(s int64) { o.sz = s } + +// Reader returns an io.ReadCloser used to read the object's content. +// +// For a MemoryObject, this reader is seekable. +func (o *MemoryObject) Reader() (io.ReadCloser, error) { + return nopCloser{bytes.NewReader(o.cont)}, nil +} + +// Writer returns a ObjectWriter used to write the object's content. +func (o *MemoryObject) Writer() (io.WriteCloser, error) { + return o, nil +} + +func (o *MemoryObject) Write(p []byte) (n int, err error) { + o.cont = append(o.cont, p...) + o.sz = int64(len(o.cont)) + + return len(p), nil +} + +// Close releases any resources consumed by the object when it is acting as a +// ObjectWriter. +func (o *MemoryObject) Close() error { return nil } + +// nopCloser exposes the extra methods of bytes.Reader while nopping Close(). +// +// This allows clients to attempt seeking in a cached Blob's Reader. +type nopCloser struct { + *bytes.Reader +} + +// Close does nothing. +func (nc nopCloser) Close() error { return nil } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object.go new file mode 100644 index 0000000000..2655dee43e --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object.go @@ -0,0 +1,111 @@ +// package plumbing implement the core interfaces and structs used by go-git +package plumbing + +import ( + "errors" + "io" +) + +var ( + ErrObjectNotFound = errors.New("object not found") + // ErrInvalidType is returned when an invalid object type is provided. + ErrInvalidType = errors.New("invalid object type") +) + +// Object is a generic representation of any git object +type EncodedObject interface { + Hash() Hash + Type() ObjectType + SetType(ObjectType) + Size() int64 + SetSize(int64) + Reader() (io.ReadCloser, error) + Writer() (io.WriteCloser, error) +} + +// DeltaObject is an EncodedObject representing a delta. +type DeltaObject interface { + EncodedObject + // BaseHash returns the hash of the object used as base for this delta. + BaseHash() Hash + // ActualHash returns the hash of the object after applying the delta. + ActualHash() Hash + // Size returns the size of the object after applying the delta. + ActualSize() int64 +} + +// ObjectType internal object type +// Integer values from 0 to 7 map to those exposed by git. +// AnyObject is used to represent any from 0 to 7. +type ObjectType int8 + +const ( + InvalidObject ObjectType = 0 + CommitObject ObjectType = 1 + TreeObject ObjectType = 2 + BlobObject ObjectType = 3 + TagObject ObjectType = 4 + // 5 reserved for future expansion + OFSDeltaObject ObjectType = 6 + REFDeltaObject ObjectType = 7 + + AnyObject ObjectType = -127 +) + +func (t ObjectType) String() string { + switch t { + case CommitObject: + return "commit" + case TreeObject: + return "tree" + case BlobObject: + return "blob" + case TagObject: + return "tag" + case OFSDeltaObject: + return "ofs-delta" + case REFDeltaObject: + return "ref-delta" + case AnyObject: + return "any" + default: + return "unknown" + } +} + +func (t ObjectType) Bytes() []byte { + return []byte(t.String()) +} + +// Valid returns true if t is a valid ObjectType. +func (t ObjectType) Valid() bool { + return t >= CommitObject && t <= REFDeltaObject +} + +// IsDelta returns true for any ObjectTyoe that represents a delta (i.e. +// REFDeltaObject or OFSDeltaObject). +func (t ObjectType) IsDelta() bool { + return t == REFDeltaObject || t == OFSDeltaObject +} + +// ParseObjectType parses a string representation of ObjectType. It returns an +// error on parse failure. +func ParseObjectType(value string) (typ ObjectType, err error) { + switch value { + case "commit": + typ = CommitObject + case "tree": + typ = TreeObject + case "blob": + typ = BlobObject + case "tag": + typ = TagObject + case "ofs-delta": + typ = OFSDeltaObject + case "ref-delta": + typ = REFDeltaObject + default: + err = ErrInvalidType + } + return +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go new file mode 100644 index 0000000000..8fb7576fa3 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/blob.go @@ -0,0 +1,144 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// Blob is used to store arbitrary data - it is generally a file. +type Blob struct { + // Hash of the blob. + Hash plumbing.Hash + // Size of the (uncompressed) blob. + Size int64 + + obj plumbing.EncodedObject +} + +// GetBlob gets a blob from an object storer and decodes it. +func GetBlob(s storer.EncodedObjectStorer, h plumbing.Hash) (*Blob, error) { + o, err := s.EncodedObject(plumbing.BlobObject, h) + if err != nil { + return nil, err + } + + return DecodeBlob(o) +} + +// DecodeObject decodes an encoded object into a *Blob. +func DecodeBlob(o plumbing.EncodedObject) (*Blob, error) { + b := &Blob{} + if err := b.Decode(o); err != nil { + return nil, err + } + + return b, nil +} + +// ID returns the object ID of the blob. The returned value will always match +// the current value of Blob.Hash. +// +// ID is present to fulfill the Object interface. +func (b *Blob) ID() plumbing.Hash { + return b.Hash +} + +// Type returns the type of object. It always returns plumbing.BlobObject. +// +// Type is present to fulfill the Object interface. +func (b *Blob) Type() plumbing.ObjectType { + return plumbing.BlobObject +} + +// Decode transforms a plumbing.EncodedObject into a Blob struct. +func (b *Blob) Decode(o plumbing.EncodedObject) error { + if o.Type() != plumbing.BlobObject { + return ErrUnsupportedObject + } + + b.Hash = o.Hash() + b.Size = o.Size() + b.obj = o + + return nil +} + +// Encode transforms a Blob into a plumbing.EncodedObject. +func (b *Blob) Encode(o plumbing.EncodedObject) (err error) { + o.SetType(plumbing.BlobObject) + + w, err := o.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + r, err := b.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + _, err = io.Copy(w, r) + return err +} + +// Reader returns a reader allow the access to the content of the blob +func (b *Blob) Reader() (io.ReadCloser, error) { + return b.obj.Reader() +} + +// BlobIter provides an iterator for a set of blobs. +type BlobIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewBlobIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *BlobIter that iterates over all +// blobs contained in the storer.EncodedObjectIter. +// +// Any non-blob object returned by the storer.EncodedObjectIter is skipped. +func NewBlobIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *BlobIter { + return &BlobIter{iter, s} +} + +// Next moves the iterator to the next blob and returns a pointer to it. If +// there are no more blobs, it returns io.EOF. +func (iter *BlobIter) Next() (*Blob, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + if obj.Type() != plumbing.BlobObject { + continue + } + + return DecodeBlob(obj) + } +} + +// ForEach call the cb function for each blob contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *BlobIter) ForEach(cb func(*Blob) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + if obj.Type() != plumbing.BlobObject { + return nil + } + + b, err := DecodeBlob(obj) + if err != nil { + return err + } + + return cb(b) + }) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go new file mode 100644 index 0000000000..8b119bc9c9 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/change.go @@ -0,0 +1,159 @@ +package object + +import ( + "bytes" + "context" + "fmt" + "strings" + + "github.com/go-git/go-git/v5/utils/merkletrie" +) + +// Change values represent a detected change between two git trees. For +// modifications, From is the original status of the node and To is its +// final status. For insertions, From is the zero value and for +// deletions To is the zero value. +type Change struct { + From ChangeEntry + To ChangeEntry +} + +var empty ChangeEntry + +// Action returns the kind of action represented by the change, an +// insertion, a deletion or a modification. +func (c *Change) Action() (merkletrie.Action, error) { + if c.From == empty && c.To == empty { + return merkletrie.Action(0), + fmt.Errorf("malformed change: empty from and to") + } + + if c.From == empty { + return merkletrie.Insert, nil + } + + if c.To == empty { + return merkletrie.Delete, nil + } + + return merkletrie.Modify, nil +} + +// Files return the files before and after a change. +// For insertions from will be nil. For deletions to will be nil. +func (c *Change) Files() (from, to *File, err error) { + action, err := c.Action() + if err != nil { + return + } + + if action == merkletrie.Insert || action == merkletrie.Modify { + to, err = c.To.Tree.TreeEntryFile(&c.To.TreeEntry) + if !c.To.TreeEntry.Mode.IsFile() { + return nil, nil, nil + } + + if err != nil { + return + } + } + + if action == merkletrie.Delete || action == merkletrie.Modify { + from, err = c.From.Tree.TreeEntryFile(&c.From.TreeEntry) + if !c.From.TreeEntry.Mode.IsFile() { + return nil, nil, nil + } + + if err != nil { + return + } + } + + return +} + +func (c *Change) String() string { + action, err := c.Action() + if err != nil { + return "malformed change" + } + + return fmt.Sprintf("", action, c.name()) +} + +// Patch returns a Patch with all the file changes in chunks. This +// representation can be used to create several diff outputs. +func (c *Change) Patch() (*Patch, error) { + return c.PatchContext(context.Background()) +} + +// Patch returns a Patch with all the file changes in chunks. This +// representation can be used to create several diff outputs. +// If context expires, an non-nil error will be returned +// Provided context must be non-nil +func (c *Change) PatchContext(ctx context.Context) (*Patch, error) { + return getPatchContext(ctx, "", c) +} + +func (c *Change) name() string { + if c.From != empty { + return c.From.Name + } + + return c.To.Name +} + +// ChangeEntry values represent a node that has suffered a change. +type ChangeEntry struct { + // Full path of the node using "/" as separator. + Name string + // Parent tree of the node that has changed. + Tree *Tree + // The entry of the node. + TreeEntry TreeEntry +} + +// Changes represents a collection of changes between two git trees. +// Implements sort.Interface lexicographically over the path of the +// changed files. +type Changes []*Change + +func (c Changes) Len() int { + return len(c) +} + +func (c Changes) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} + +func (c Changes) Less(i, j int) bool { + return strings.Compare(c[i].name(), c[j].name()) < 0 +} + +func (c Changes) String() string { + var buffer bytes.Buffer + buffer.WriteString("[") + comma := "" + for _, v := range c { + buffer.WriteString(comma) + buffer.WriteString(v.String()) + comma = ", " + } + buffer.WriteString("]") + + return buffer.String() +} + +// Patch returns a Patch with all the changes in chunks. This +// representation can be used to create several diff outputs. +func (c Changes) Patch() (*Patch, error) { + return c.PatchContext(context.Background()) +} + +// Patch returns a Patch with all the changes in chunks. This +// representation can be used to create several diff outputs. +// If context expires, an non-nil error will be returned +// Provided context must be non-nil +func (c Changes) PatchContext(ctx context.Context) (*Patch, error) { + return getPatchContext(ctx, "", c...) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go new file mode 100644 index 0000000000..f701188288 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/change_adaptor.go @@ -0,0 +1,61 @@ +package object + +import ( + "errors" + "fmt" + + "github.com/go-git/go-git/v5/utils/merkletrie" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// The following functions transform changes types form the merkletrie +// package to changes types from this package. + +func newChange(c merkletrie.Change) (*Change, error) { + ret := &Change{} + + var err error + if ret.From, err = newChangeEntry(c.From); err != nil { + return nil, fmt.Errorf("From field: %s", err) + } + + if ret.To, err = newChangeEntry(c.To); err != nil { + return nil, fmt.Errorf("To field: %s", err) + } + + return ret, nil +} + +func newChangeEntry(p noder.Path) (ChangeEntry, error) { + if p == nil { + return empty, nil + } + + asTreeNoder, ok := p.Last().(*treeNoder) + if !ok { + return ChangeEntry{}, errors.New("cannot transform non-TreeNoders") + } + + return ChangeEntry{ + Name: p.String(), + Tree: asTreeNoder.parent, + TreeEntry: TreeEntry{ + Name: asTreeNoder.name, + Mode: asTreeNoder.mode, + Hash: asTreeNoder.hash, + }, + }, nil +} + +func newChanges(src merkletrie.Changes) (Changes, error) { + ret := make(Changes, len(src)) + var err error + for i, e := range src { + ret[i], err = newChange(e) + if err != nil { + return nil, fmt.Errorf("change #%d: %s", i, err) + } + } + + return ret, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go new file mode 100644 index 0000000000..7a1b8e5ae0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit.go @@ -0,0 +1,442 @@ +package object + +import ( + "bufio" + "bytes" + "context" + "errors" + "fmt" + "io" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +const ( + beginpgp string = "-----BEGIN PGP SIGNATURE-----" + endpgp string = "-----END PGP SIGNATURE-----" + headerpgp string = "gpgsig" +) + +// Hash represents the hash of an object +type Hash plumbing.Hash + +// Commit points to a single tree, marking it as what the project looked like +// at a certain point in time. It contains meta-information about that point +// in time, such as a timestamp, the author of the changes since the last +// commit, a pointer to the previous commit(s), etc. +// http://shafiulazam.com/gitbook/1_the_git_object_model.html +type Commit struct { + // Hash of the commit object. + Hash plumbing.Hash + // Author is the original author of the commit. + Author Signature + // Committer is the one performing the commit, might be different from + // Author. + Committer Signature + // PGPSignature is the PGP signature of the commit. + PGPSignature string + // Message is the commit message, contains arbitrary text. + Message string + // TreeHash is the hash of the root tree of the commit. + TreeHash plumbing.Hash + // ParentHashes are the hashes of the parent commits of the commit. + ParentHashes []plumbing.Hash + + s storer.EncodedObjectStorer +} + +// GetCommit gets a commit from an object storer and decodes it. +func GetCommit(s storer.EncodedObjectStorer, h plumbing.Hash) (*Commit, error) { + o, err := s.EncodedObject(plumbing.CommitObject, h) + if err != nil { + return nil, err + } + + return DecodeCommit(s, o) +} + +// DecodeCommit decodes an encoded object into a *Commit and associates it to +// the given object storer. +func DecodeCommit(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Commit, error) { + c := &Commit{s: s} + if err := c.Decode(o); err != nil { + return nil, err + } + + return c, nil +} + +// Tree returns the Tree from the commit. +func (c *Commit) Tree() (*Tree, error) { + return GetTree(c.s, c.TreeHash) +} + +// PatchContext returns the Patch between the actual commit and the provided one. +// Error will be return if context expires. Provided context must be non-nil. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (c *Commit) PatchContext(ctx context.Context, to *Commit) (*Patch, error) { + fromTree, err := c.Tree() + if err != nil { + return nil, err + } + + var toTree *Tree + if to != nil { + toTree, err = to.Tree() + if err != nil { + return nil, err + } + } + + return fromTree.PatchContext(ctx, toTree) +} + +// Patch returns the Patch between the actual commit and the provided one. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (c *Commit) Patch(to *Commit) (*Patch, error) { + return c.PatchContext(context.Background(), to) +} + +// Parents return a CommitIter to the parent Commits. +func (c *Commit) Parents() CommitIter { + return NewCommitIter(c.s, + storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, c.ParentHashes), + ) +} + +// NumParents returns the number of parents in a commit. +func (c *Commit) NumParents() int { + return len(c.ParentHashes) +} + +var ErrParentNotFound = errors.New("commit parent not found") + +// Parent returns the ith parent of a commit. +func (c *Commit) Parent(i int) (*Commit, error) { + if len(c.ParentHashes) == 0 || i > len(c.ParentHashes)-1 { + return nil, ErrParentNotFound + } + + return GetCommit(c.s, c.ParentHashes[i]) +} + +// File returns the file with the specified "path" in the commit and a +// nil error if the file exists. If the file does not exist, it returns +// a nil file and the ErrFileNotFound error. +func (c *Commit) File(path string) (*File, error) { + tree, err := c.Tree() + if err != nil { + return nil, err + } + + return tree.File(path) +} + +// Files returns a FileIter allowing to iterate over the Tree +func (c *Commit) Files() (*FileIter, error) { + tree, err := c.Tree() + if err != nil { + return nil, err + } + + return tree.Files(), nil +} + +// ID returns the object ID of the commit. The returned value will always match +// the current value of Commit.Hash. +// +// ID is present to fulfill the Object interface. +func (c *Commit) ID() plumbing.Hash { + return c.Hash +} + +// Type returns the type of object. It always returns plumbing.CommitObject. +// +// Type is present to fulfill the Object interface. +func (c *Commit) Type() plumbing.ObjectType { + return plumbing.CommitObject +} + +// Decode transforms a plumbing.EncodedObject into a Commit struct. +func (c *Commit) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.CommitObject { + return ErrUnsupportedObject + } + + c.Hash = o.Hash() + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) + + var message bool + var pgpsig bool + var msgbuf bytes.Buffer + for { + line, err := r.ReadBytes('\n') + if err != nil && err != io.EOF { + return err + } + + if pgpsig { + if len(line) > 0 && line[0] == ' ' { + line = bytes.TrimLeft(line, " ") + c.PGPSignature += string(line) + continue + } else { + pgpsig = false + } + } + + if !message { + line = bytes.TrimSpace(line) + if len(line) == 0 { + message = true + continue + } + + split := bytes.SplitN(line, []byte{' '}, 2) + + var data []byte + if len(split) == 2 { + data = split[1] + } + + switch string(split[0]) { + case "tree": + c.TreeHash = plumbing.NewHash(string(data)) + case "parent": + c.ParentHashes = append(c.ParentHashes, plumbing.NewHash(string(data))) + case "author": + c.Author.Decode(data) + case "committer": + c.Committer.Decode(data) + case headerpgp: + c.PGPSignature += string(data) + "\n" + pgpsig = true + } + } else { + msgbuf.Write(line) + } + + if err == io.EOF { + break + } + } + c.Message = msgbuf.String() + return nil +} + +// Encode transforms a Commit into a plumbing.EncodedObject. +func (c *Commit) Encode(o plumbing.EncodedObject) error { + return c.encode(o, true) +} + +// EncodeWithoutSignature export a Commit into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). +func (c *Commit) EncodeWithoutSignature(o plumbing.EncodedObject) error { + return c.encode(o, false) +} + +func (c *Commit) encode(o plumbing.EncodedObject, includeSig bool) (err error) { + o.SetType(plumbing.CommitObject) + w, err := o.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + + if _, err = fmt.Fprintf(w, "tree %s\n", c.TreeHash.String()); err != nil { + return err + } + + for _, parent := range c.ParentHashes { + if _, err = fmt.Fprintf(w, "parent %s\n", parent.String()); err != nil { + return err + } + } + + if _, err = fmt.Fprint(w, "author "); err != nil { + return err + } + + if err = c.Author.Encode(w); err != nil { + return err + } + + if _, err = fmt.Fprint(w, "\ncommitter "); err != nil { + return err + } + + if err = c.Committer.Encode(w); err != nil { + return err + } + + if c.PGPSignature != "" && includeSig { + if _, err = fmt.Fprint(w, "\n"+headerpgp+" "); err != nil { + return err + } + + // Split all the signature lines and re-write with a left padding and + // newline. Use join for this so it's clear that a newline should not be + // added after this section, as it will be added when the message is + // printed. + signature := strings.TrimSuffix(c.PGPSignature, "\n") + lines := strings.Split(signature, "\n") + if _, err = fmt.Fprint(w, strings.Join(lines, "\n ")); err != nil { + return err + } + } + + if _, err = fmt.Fprintf(w, "\n\n%s", c.Message); err != nil { + return err + } + + return err +} + +// Stats returns the stats of a commit. +func (c *Commit) Stats() (FileStats, error) { + return c.StatsContext(context.Background()) +} + +// StatsContext returns the stats of a commit. Error will be return if context +// expires. Provided context must be non-nil. +func (c *Commit) StatsContext(ctx context.Context) (FileStats, error) { + fromTree, err := c.Tree() + if err != nil { + return nil, err + } + + toTree := &Tree{} + if c.NumParents() != 0 { + firstParent, err := c.Parents().Next() + if err != nil { + return nil, err + } + + toTree, err = firstParent.Tree() + if err != nil { + return nil, err + } + } + + patch, err := toTree.PatchContext(ctx, fromTree) + if err != nil { + return nil, err + } + + return getFileStatsFromFilePatches(patch.FilePatches()), nil +} + +func (c *Commit) String() string { + return fmt.Sprintf( + "%s %s\nAuthor: %s\nDate: %s\n\n%s\n", + plumbing.CommitObject, c.Hash, c.Author.String(), + c.Author.When.Format(DateFormat), indent(c.Message), + ) +} + +// Verify performs PGP verification of the commit with a provided armored +// keyring and returns openpgp.Entity associated with verifying key on success. +func (c *Commit) Verify(armoredKeyRing string) (*openpgp.Entity, error) { + keyRingReader := strings.NewReader(armoredKeyRing) + keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) + if err != nil { + return nil, err + } + + // Extract signature. + signature := strings.NewReader(c.PGPSignature) + + encoded := &plumbing.MemoryObject{} + // Encode commit components, excluding signature and get a reader object. + if err := c.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + er, err := encoded.Reader() + if err != nil { + return nil, err + } + + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) +} + +func indent(t string) string { + var output []string + for _, line := range strings.Split(t, "\n") { + if len(line) != 0 { + line = " " + line + } + + output = append(output, line) + } + + return strings.Join(output, "\n") +} + +// CommitIter is a generic closable interface for iterating over commits. +type CommitIter interface { + Next() (*Commit, error) + ForEach(func(*Commit) error) error + Close() +} + +// storerCommitIter provides an iterator from commits in an EncodedObjectStorer. +type storerCommitIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewCommitIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a CommitIter that iterates over all +// commits contained in the storer.EncodedObjectIter. +// +// Any non-commit object returned by the storer.EncodedObjectIter is skipped. +func NewCommitIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) CommitIter { + return &storerCommitIter{iter, s} +} + +// Next moves the iterator to the next commit and returns a pointer to it. If +// there are no more commits, it returns io.EOF. +func (iter *storerCommitIter) Next() (*Commit, error) { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + return DecodeCommit(iter.s, obj) +} + +// ForEach call the cb function for each commit contained on this iter until +// an error appends or the end of the iter is reached. If ErrStop is sent +// the iteration is stopped but no error is returned. The iterator is closed. +func (iter *storerCommitIter) ForEach(cb func(*Commit) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + c, err := DecodeCommit(iter.s, obj) + if err != nil { + return err + } + + return cb(c) + }) +} + +func (iter *storerCommitIter) Close() { + iter.EncodedObjectIter.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go new file mode 100644 index 0000000000..a96b6a4cf0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker.go @@ -0,0 +1,327 @@ +package object + +import ( + "container/list" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage" +) + +type commitPreIterator struct { + seenExternal map[plumbing.Hash]bool + seen map[plumbing.Hash]bool + stack []CommitIter + start *Commit +} + +// NewCommitPreorderIter returns a CommitIter that walks the commit history, +// starting at the given commit and visiting its parents in pre-order. +// The given callback will be called for each visited commit. Each commit will +// be visited only once. If the callback returns an error, walking will stop +// and will return the error. Other errors might be returned if the history +// cannot be traversed (e.g. missing objects). Ignore allows to skip some +// commits from being iterated. +func NewCommitPreorderIter( + c *Commit, + seenExternal map[plumbing.Hash]bool, + ignore []plumbing.Hash, +) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + return &commitPreIterator{ + seenExternal: seenExternal, + seen: seen, + stack: make([]CommitIter, 0), + start: c, + } +} + +func (w *commitPreIterator) Next() (*Commit, error) { + var c *Commit + for { + if w.start != nil { + c = w.start + w.start = nil + } else { + current := len(w.stack) - 1 + if current < 0 { + return nil, io.EOF + } + + var err error + c, err = w.stack[current].Next() + if err == io.EOF { + w.stack = w.stack[:current] + continue + } + + if err != nil { + return nil, err + } + } + + if w.seen[c.Hash] || w.seenExternal[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + if c.NumParents() > 0 { + w.stack = append(w.stack, filteredParentIter(c, w.seen)) + } + + return c, nil + } +} + +func filteredParentIter(c *Commit, seen map[plumbing.Hash]bool) CommitIter { + var hashes []plumbing.Hash + for _, h := range c.ParentHashes { + if !seen[h] { + hashes = append(hashes, h) + } + } + + return NewCommitIter(c.s, + storer.NewEncodedObjectLookupIter(c.s, plumbing.CommitObject, hashes), + ) +} + +func (w *commitPreIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *commitPreIterator) Close() {} + +type commitPostIterator struct { + stack []*Commit + seen map[plumbing.Hash]bool +} + +// NewCommitPostorderIter returns a CommitIter that walks the commit +// history like WalkCommitHistory but in post-order. This means that after +// walking a merge commit, the merged commit will be walked before the base +// it was merged on. This can be useful if you wish to see the history in +// chronological order. Ignore allows to skip some commits from being iterated. +func NewCommitPostorderIter(c *Commit, ignore []plumbing.Hash) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + return &commitPostIterator{ + stack: []*Commit{c}, + seen: seen, + } +} + +func (w *commitPostIterator) Next() (*Commit, error) { + for { + if len(w.stack) == 0 { + return nil, io.EOF + } + + c := w.stack[len(w.stack)-1] + w.stack = w.stack[:len(w.stack)-1] + + if w.seen[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + return c, c.Parents().ForEach(func(p *Commit) error { + w.stack = append(w.stack, p) + return nil + }) + } +} + +func (w *commitPostIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *commitPostIterator) Close() {} + +// commitAllIterator stands for commit iterator for all refs. +type commitAllIterator struct { + // currCommit points to the current commit. + currCommit *list.Element +} + +// NewCommitAllIter returns a new commit iterator for all refs. +// repoStorer is a repo Storer used to get commits and references. +// commitIterFunc is a commit iterator function, used to iterate through ref commits in chosen order +func NewCommitAllIter(repoStorer storage.Storer, commitIterFunc func(*Commit) CommitIter) (CommitIter, error) { + commitsPath := list.New() + commitsLookup := make(map[plumbing.Hash]*list.Element) + head, err := storer.ResolveReference(repoStorer, plumbing.HEAD) + if err == nil { + err = addReference(repoStorer, commitIterFunc, head, commitsPath, commitsLookup) + } + + if err != nil && err != plumbing.ErrReferenceNotFound { + return nil, err + } + + // add all references along with the HEAD + refIter, err := repoStorer.IterReferences() + if err != nil { + return nil, err + } + defer refIter.Close() + + for { + ref, err := refIter.Next() + if err == io.EOF { + break + } + + if err == plumbing.ErrReferenceNotFound { + continue + } + + if err != nil { + return nil, err + } + + if err = addReference(repoStorer, commitIterFunc, ref, commitsPath, commitsLookup); err != nil { + return nil, err + } + } + + return &commitAllIterator{commitsPath.Front()}, nil +} + +func addReference( + repoStorer storage.Storer, + commitIterFunc func(*Commit) CommitIter, + ref *plumbing.Reference, + commitsPath *list.List, + commitsLookup map[plumbing.Hash]*list.Element) error { + + _, exists := commitsLookup[ref.Hash()] + if exists { + // we already have it - skip the reference. + return nil + } + + refCommit, _ := GetCommit(repoStorer, ref.Hash()) + if refCommit == nil { + // if it's not a commit - skip it. + return nil + } + + var ( + refCommits []*Commit + parent *list.Element + ) + // collect all ref commits to add + commitIter := commitIterFunc(refCommit) + for c, e := commitIter.Next(); e == nil; { + parent, exists = commitsLookup[c.Hash] + if exists { + break + } + refCommits = append(refCommits, c) + c, e = commitIter.Next() + } + commitIter.Close() + + if parent == nil { + // common parent - not found + // add all commits to the path from this ref (maybe it's a HEAD and we don't have anything, yet) + for _, c := range refCommits { + parent = commitsPath.PushBack(c) + commitsLookup[c.Hash] = parent + } + } else { + // add ref's commits to the path in reverse order (from the latest) + for i := len(refCommits) - 1; i >= 0; i-- { + c := refCommits[i] + // insert before found common parent + parent = commitsPath.InsertBefore(c, parent) + commitsLookup[c.Hash] = parent + } + } + + return nil +} + +func (it *commitAllIterator) Next() (*Commit, error) { + if it.currCommit == nil { + return nil, io.EOF + } + + c := it.currCommit.Value.(*Commit) + it.currCommit = it.currCommit.Next() + + return c, nil +} + +func (it *commitAllIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := it.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (it *commitAllIterator) Close() { + it.currCommit = nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go new file mode 100644 index 0000000000..8047fa9bc0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs.go @@ -0,0 +1,100 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type bfsCommitIterator struct { + seenExternal map[plumbing.Hash]bool + seen map[plumbing.Hash]bool + queue []*Commit +} + +// NewCommitIterBSF returns a CommitIter that walks the commit history, +// starting at the given commit and visiting its parents in pre-order. +// The given callback will be called for each visited commit. Each commit will +// be visited only once. If the callback returns an error, walking will stop +// and will return the error. Other errors might be returned if the history +// cannot be traversed (e.g. missing objects). Ignore allows to skip some +// commits from being iterated. +func NewCommitIterBSF( + c *Commit, + seenExternal map[plumbing.Hash]bool, + ignore []plumbing.Hash, +) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + return &bfsCommitIterator{ + seenExternal: seenExternal, + seen: seen, + queue: []*Commit{c}, + } +} + +func (w *bfsCommitIterator) appendHash(store storer.EncodedObjectStorer, h plumbing.Hash) error { + if w.seen[h] || w.seenExternal[h] { + return nil + } + c, err := GetCommit(store, h) + if err != nil { + return err + } + w.queue = append(w.queue, c) + return nil +} + +func (w *bfsCommitIterator) Next() (*Commit, error) { + var c *Commit + for { + if len(w.queue) == 0 { + return nil, io.EOF + } + c = w.queue[0] + w.queue = w.queue[1:] + + if w.seen[c.Hash] || w.seenExternal[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + for _, h := range c.ParentHashes { + err := w.appendHash(c.s, h) + if err != nil { + return nil, err + } + } + + return c, nil + } +} + +func (w *bfsCommitIterator) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *bfsCommitIterator) Close() {} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go new file mode 100644 index 0000000000..9d518133e2 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_bfs_filtered.go @@ -0,0 +1,175 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +// NewFilterCommitIter returns a CommitIter that walks the commit history, +// starting at the passed commit and visiting its parents in Breadth-first order. +// The commits returned by the CommitIter will validate the passed CommitFilter. +// The history won't be transversed beyond a commit if isLimit is true for it. +// Each commit will be visited only once. +// If the commit history can not be traversed, or the Close() method is called, +// the CommitIter won't return more commits. +// If no isValid is passed, all ancestors of from commit will be valid. +// If no isLimit is limit, all ancestors of all commits will be visited. +func NewFilterCommitIter( + from *Commit, + isValid *CommitFilter, + isLimit *CommitFilter, +) CommitIter { + var validFilter CommitFilter + if isValid == nil { + validFilter = func(_ *Commit) bool { + return true + } + } else { + validFilter = *isValid + } + + var limitFilter CommitFilter + if isLimit == nil { + limitFilter = func(_ *Commit) bool { + return false + } + } else { + limitFilter = *isLimit + } + + return &filterCommitIter{ + isValid: validFilter, + isLimit: limitFilter, + visited: map[plumbing.Hash]struct{}{}, + queue: []*Commit{from}, + } +} + +// CommitFilter returns a boolean for the passed Commit +type CommitFilter func(*Commit) bool + +// filterCommitIter implements CommitIter +type filterCommitIter struct { + isValid CommitFilter + isLimit CommitFilter + visited map[plumbing.Hash]struct{} + queue []*Commit + lastErr error +} + +// Next returns the next commit of the CommitIter. +// It will return io.EOF if there are no more commits to visit, +// or an error if the history could not be traversed. +func (w *filterCommitIter) Next() (*Commit, error) { + var commit *Commit + var err error + for { + commit, err = w.popNewFromQueue() + if err != nil { + return nil, w.close(err) + } + + w.visited[commit.Hash] = struct{}{} + + if !w.isLimit(commit) { + err = w.addToQueue(commit.s, commit.ParentHashes...) + if err != nil { + return nil, w.close(err) + } + } + + if w.isValid(commit) { + return commit, nil + } + } +} + +// ForEach runs the passed callback over each Commit returned by the CommitIter +// until the callback returns an error or there is no more commits to traverse. +func (w *filterCommitIter) ForEach(cb func(*Commit) error) error { + for { + commit, err := w.Next() + if err == io.EOF { + break + } + + if err != nil { + return err + } + + if err := cb(commit); err == storer.ErrStop { + break + } else if err != nil { + return err + } + } + + return nil +} + +// Error returns the error that caused that the CommitIter is no longer returning commits +func (w *filterCommitIter) Error() error { + return w.lastErr +} + +// Close closes the CommitIter +func (w *filterCommitIter) Close() { + w.visited = map[plumbing.Hash]struct{}{} + w.queue = []*Commit{} + w.isLimit = nil + w.isValid = nil +} + +// close closes the CommitIter with an error +func (w *filterCommitIter) close(err error) error { + w.Close() + w.lastErr = err + return err +} + +// popNewFromQueue returns the first new commit from the internal fifo queue, +// or an io.EOF error if the queue is empty +func (w *filterCommitIter) popNewFromQueue() (*Commit, error) { + var first *Commit + for { + if len(w.queue) == 0 { + if w.lastErr != nil { + return nil, w.lastErr + } + + return nil, io.EOF + } + + first = w.queue[0] + w.queue = w.queue[1:] + if _, ok := w.visited[first.Hash]; ok { + continue + } + + return first, nil + } +} + +// addToQueue adds the passed commits to the internal fifo queue if they weren't seen +// or returns an error if the passed hashes could not be used to get valid commits +func (w *filterCommitIter) addToQueue( + store storer.EncodedObjectStorer, + hashes ...plumbing.Hash, +) error { + for _, hash := range hashes { + if _, ok := w.visited[hash]; ok { + continue + } + + commit, err := GetCommit(store, hash) + if err != nil { + return err + } + + w.queue = append(w.queue, commit) + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go new file mode 100644 index 0000000000..fbddf1d238 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_ctime.go @@ -0,0 +1,103 @@ +package object + +import ( + "io" + + "github.com/emirpasic/gods/trees/binaryheap" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type commitIteratorByCTime struct { + seenExternal map[plumbing.Hash]bool + seen map[plumbing.Hash]bool + heap *binaryheap.Heap +} + +// NewCommitIterCTime returns a CommitIter that walks the commit history, +// starting at the given commit and visiting its parents while preserving Committer Time order. +// this appears to be the closest order to `git log` +// The given callback will be called for each visited commit. Each commit will +// be visited only once. If the callback returns an error, walking will stop +// and will return the error. Other errors might be returned if the history +// cannot be traversed (e.g. missing objects). Ignore allows to skip some +// commits from being iterated. +func NewCommitIterCTime( + c *Commit, + seenExternal map[plumbing.Hash]bool, + ignore []plumbing.Hash, +) CommitIter { + seen := make(map[plumbing.Hash]bool) + for _, h := range ignore { + seen[h] = true + } + + heap := binaryheap.NewWith(func(a, b interface{}) int { + if a.(*Commit).Committer.When.Before(b.(*Commit).Committer.When) { + return 1 + } + return -1 + }) + heap.Push(c) + + return &commitIteratorByCTime{ + seenExternal: seenExternal, + seen: seen, + heap: heap, + } +} + +func (w *commitIteratorByCTime) Next() (*Commit, error) { + var c *Commit + for { + cIn, ok := w.heap.Pop() + if !ok { + return nil, io.EOF + } + c = cIn.(*Commit) + + if w.seen[c.Hash] || w.seenExternal[c.Hash] { + continue + } + + w.seen[c.Hash] = true + + for _, h := range c.ParentHashes { + if w.seen[h] || w.seenExternal[h] { + continue + } + pc, err := GetCommit(c.s, h) + if err != nil { + return nil, err + } + w.heap.Push(pc) + } + + return c, nil + } +} + +func (w *commitIteratorByCTime) ForEach(cb func(*Commit) error) error { + for { + c, err := w.Next() + if err == io.EOF { + break + } + if err != nil { + return err + } + + err = cb(c) + if err == storer.ErrStop { + break + } + if err != nil { + return err + } + } + + return nil +} + +func (w *commitIteratorByCTime) Close() {} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go new file mode 100644 index 0000000000..ac56a71c41 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_limit.go @@ -0,0 +1,65 @@ +package object + +import ( + "io" + "time" + + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type commitLimitIter struct { + sourceIter CommitIter + limitOptions LogLimitOptions +} + +type LogLimitOptions struct { + Since *time.Time + Until *time.Time +} + +func NewCommitLimitIterFromIter(commitIter CommitIter, limitOptions LogLimitOptions) CommitIter { + iterator := new(commitLimitIter) + iterator.sourceIter = commitIter + iterator.limitOptions = limitOptions + return iterator +} + +func (c *commitLimitIter) Next() (*Commit, error) { + for { + commit, err := c.sourceIter.Next() + if err != nil { + return nil, err + } + + if c.limitOptions.Since != nil && commit.Committer.When.Before(*c.limitOptions.Since) { + continue + } + if c.limitOptions.Until != nil && commit.Committer.When.After(*c.limitOptions.Until) { + continue + } + return commit, nil + } +} + +func (c *commitLimitIter) ForEach(cb func(*Commit) error) error { + for { + commit, nextErr := c.Next() + if nextErr == io.EOF { + break + } + if nextErr != nil { + return nextErr + } + err := cb(commit) + if err == storer.ErrStop { + return nil + } else if err != nil { + return err + } + } + return nil +} + +func (c *commitLimitIter) Close() { + c.sourceIter.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go new file mode 100644 index 0000000000..aa0ca15fd0 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/commit_walker_path.go @@ -0,0 +1,160 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +type commitPathIter struct { + pathFilter func(string) bool + sourceIter CommitIter + currentCommit *Commit + checkParent bool +} + +// NewCommitPathIterFromIter returns a commit iterator which performs diffTree between +// successive trees returned from the commit iterator from the argument. The purpose of this is +// to find the commits that explain how the files that match the path came to be. +// If checkParent is true then the function double checks if potential parent (next commit in a path) +// is one of the parents in the tree (it's used by `git log --all`). +// pathFilter is a function that takes path of file as argument and returns true if we want it +func NewCommitPathIterFromIter(pathFilter func(string) bool, commitIter CommitIter, checkParent bool) CommitIter { + iterator := new(commitPathIter) + iterator.sourceIter = commitIter + iterator.pathFilter = pathFilter + iterator.checkParent = checkParent + return iterator +} + +// NewCommitFileIterFromIter is kept for compatibility, can be replaced with NewCommitPathIterFromIter +func NewCommitFileIterFromIter(fileName string, commitIter CommitIter, checkParent bool) CommitIter { + return NewCommitPathIterFromIter( + func(path string) bool { + return path == fileName + }, + commitIter, + checkParent, + ) +} + +func (c *commitPathIter) Next() (*Commit, error) { + if c.currentCommit == nil { + var err error + c.currentCommit, err = c.sourceIter.Next() + if err != nil { + return nil, err + } + } + commit, commitErr := c.getNextFileCommit() + + // Setting current-commit to nil to prevent unwanted states when errors are raised + if commitErr != nil { + c.currentCommit = nil + } + return commit, commitErr +} + +func (c *commitPathIter) getNextFileCommit() (*Commit, error) { + for { + // Parent-commit can be nil if the current-commit is the initial commit + parentCommit, parentCommitErr := c.sourceIter.Next() + if parentCommitErr != nil { + // If the parent-commit is beyond the initial commit, keep it nil + if parentCommitErr != io.EOF { + return nil, parentCommitErr + } + parentCommit = nil + } + + // Fetch the trees of the current and parent commits + currentTree, currTreeErr := c.currentCommit.Tree() + if currTreeErr != nil { + return nil, currTreeErr + } + + var parentTree *Tree + if parentCommit != nil { + var parentTreeErr error + parentTree, parentTreeErr = parentCommit.Tree() + if parentTreeErr != nil { + return nil, parentTreeErr + } + } + + // Find diff between current and parent trees + changes, diffErr := DiffTree(currentTree, parentTree) + if diffErr != nil { + return nil, diffErr + } + + found := c.hasFileChange(changes, parentCommit) + + // Storing the current-commit in-case a change is found, and + // Updating the current-commit for the next-iteration + prevCommit := c.currentCommit + c.currentCommit = parentCommit + + if found { + return prevCommit, nil + } + + // If not matches found and if parent-commit is beyond the initial commit, then return with EOF + if parentCommit == nil { + return nil, io.EOF + } + } +} + +func (c *commitPathIter) hasFileChange(changes Changes, parent *Commit) bool { + for _, change := range changes { + if !c.pathFilter(change.name()) { + continue + } + + // filename matches, now check if source iterator contains all commits (from all refs) + if c.checkParent { + if parent != nil && isParentHash(parent.Hash, c.currentCommit) { + return true + } + continue + } + + return true + } + + return false +} + +func isParentHash(hash plumbing.Hash, commit *Commit) bool { + for _, h := range commit.ParentHashes { + if h == hash { + return true + } + } + return false +} + +func (c *commitPathIter) ForEach(cb func(*Commit) error) error { + for { + commit, nextErr := c.Next() + if nextErr == io.EOF { + break + } + if nextErr != nil { + return nextErr + } + err := cb(commit) + if err == storer.ErrStop { + return nil + } else if err != nil { + return err + } + } + return nil +} + +func (c *commitPathIter) Close() { + c.sourceIter.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go new file mode 100644 index 0000000000..3591f5f0a6 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/common.go @@ -0,0 +1,12 @@ +package object + +import ( + "bufio" + "sync" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + return bufio.NewReader(nil) + }, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go new file mode 100644 index 0000000000..7c2222702c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/difftree.go @@ -0,0 +1,98 @@ +package object + +import ( + "bytes" + "context" + + "github.com/go-git/go-git/v5/utils/merkletrie" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// DiffTree compares the content and mode of the blobs found via two +// tree objects. +// DiffTree does not perform rename detection, use DiffTreeWithOptions +// instead to detect renames. +func DiffTree(a, b *Tree) (Changes, error) { + return DiffTreeContext(context.Background(), a, b) +} + +// DiffTreeContext compares the content and mode of the blobs found via two +// tree objects. Provided context must be non-nil. +// An error will be returned if context expires. +func DiffTreeContext(ctx context.Context, a, b *Tree) (Changes, error) { + return DiffTreeWithOptions(ctx, a, b, nil) +} + +// DiffTreeOptions are the configurable options when performing a diff tree. +type DiffTreeOptions struct { + // DetectRenames is whether the diff tree will use rename detection. + DetectRenames bool + // RenameScore is the threshold to of similarity between files to consider + // that a pair of delete and insert are a rename. The number must be + // exactly between 0 and 100. + RenameScore uint + // RenameLimit is the maximum amount of files that can be compared when + // detecting renames. The number of comparisons that have to be performed + // is equal to the number of deleted files * the number of added files. + // That means, that if 100 files were deleted and 50 files were added, 5000 + // file comparisons may be needed. So, if the rename limit is 50, the number + // of both deleted and added needs to be equal or less than 50. + // A value of 0 means no limit. + RenameLimit uint + // OnlyExactRenames performs only detection of exact renames and will not perform + // any detection of renames based on file similarity. + OnlyExactRenames bool +} + +// DefaultDiffTreeOptions are the default and recommended options for the +// diff tree. +var DefaultDiffTreeOptions = &DiffTreeOptions{ + DetectRenames: true, + RenameScore: 60, + RenameLimit: 0, + OnlyExactRenames: false, +} + +// DiffTreeWithOptions compares the content and mode of the blobs found +// via two tree objects with the given options. The provided context +// must be non-nil. +// If no options are passed, no rename detection will be performed. The +// recommended options are DefaultDiffTreeOptions. +// An error will be returned if the context expires. +// This function will be deprecated and removed in v6 so the default +// behaviour of DiffTree is to detect renames. +func DiffTreeWithOptions( + ctx context.Context, + a, b *Tree, + opts *DiffTreeOptions, +) (Changes, error) { + from := NewTreeRootNode(a) + to := NewTreeRootNode(b) + + hashEqual := func(a, b noder.Hasher) bool { + return bytes.Equal(a.Hash(), b.Hash()) + } + + merkletrieChanges, err := merkletrie.DiffTreeContext(ctx, from, to, hashEqual) + if err != nil { + if err == merkletrie.ErrCanceled { + return nil, ErrCanceled + } + return nil, err + } + + changes, err := newChanges(merkletrieChanges) + if err != nil { + return nil, err + } + + if opts == nil { + opts = new(DiffTreeOptions) + } + + if opts.DetectRenames { + return DetectRenames(changes, opts) + } + + return changes, nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go new file mode 100644 index 0000000000..6cc5367d8d --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/file.go @@ -0,0 +1,137 @@ +package object + +import ( + "bytes" + "io" + "strings" + + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/binary" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// File represents git file objects. +type File struct { + // Name is the path of the file. It might be relative to a tree, + // depending of the function that generates it. + Name string + // Mode is the file mode. + Mode filemode.FileMode + // Blob with the contents of the file. + Blob +} + +// NewFile returns a File based on the given blob object +func NewFile(name string, m filemode.FileMode, b *Blob) *File { + return &File{Name: name, Mode: m, Blob: *b} +} + +// Contents returns the contents of a file as a string. +func (f *File) Contents() (content string, err error) { + reader, err := f.Reader() + if err != nil { + return "", err + } + defer ioutil.CheckClose(reader, &err) + + buf := new(bytes.Buffer) + if _, err := buf.ReadFrom(reader); err != nil { + return "", err + } + + return buf.String(), nil +} + +// IsBinary returns if the file is binary or not +func (f *File) IsBinary() (bin bool, err error) { + reader, err := f.Reader() + if err != nil { + return false, err + } + defer ioutil.CheckClose(reader, &err) + + return binary.IsBinary(reader) +} + +// Lines returns a slice of lines from the contents of a file, stripping +// all end of line characters. If the last line is empty (does not end +// in an end of line), it is also stripped. +func (f *File) Lines() ([]string, error) { + content, err := f.Contents() + if err != nil { + return nil, err + } + + splits := strings.Split(content, "\n") + // remove the last line if it is empty + if splits[len(splits)-1] == "" { + return splits[:len(splits)-1], nil + } + + return splits, nil +} + +// FileIter provides an iterator for the files in a tree. +type FileIter struct { + s storer.EncodedObjectStorer + w TreeWalker +} + +// NewFileIter takes a storer.EncodedObjectStorer and a Tree and returns a +// *FileIter that iterates over all files contained in the tree, recursively. +func NewFileIter(s storer.EncodedObjectStorer, t *Tree) *FileIter { + return &FileIter{s: s, w: *NewTreeWalker(t, true, nil)} +} + +// Next moves the iterator to the next file and returns a pointer to it. If +// there are no more files, it returns io.EOF. +func (iter *FileIter) Next() (*File, error) { + for { + name, entry, err := iter.w.Next() + if err != nil { + return nil, err + } + + if entry.Mode == filemode.Dir || entry.Mode == filemode.Submodule { + continue + } + + blob, err := GetBlob(iter.s, entry.Hash) + if err != nil { + return nil, err + } + + return NewFile(name, entry.Mode, blob), nil + } +} + +// ForEach call the cb function for each file contained in this iter until +// an error happens or the end of the iter is reached. If plumbing.ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *FileIter) ForEach(cb func(*File) error) error { + defer iter.Close() + + for { + f, err := iter.Next() + if err != nil { + if err == io.EOF { + return nil + } + + return err + } + + if err := cb(f); err != nil { + if err == storer.ErrStop { + return nil + } + + return err + } + } +} + +func (iter *FileIter) Close() { + iter.w.Close() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go new file mode 100644 index 0000000000..b412361d02 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/merge_base.go @@ -0,0 +1,210 @@ +package object + +import ( + "fmt" + "sort" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +// errIsReachable is thrown when first commit is an ancestor of the second +var errIsReachable = fmt.Errorf("first is reachable from second") + +// MergeBase mimics the behavior of `git merge-base actual other`, returning the +// best common ancestor between the actual and the passed one. +// The best common ancestors can not be reached from other common ancestors. +func (c *Commit) MergeBase(other *Commit) ([]*Commit, error) { + // use sortedByCommitDateDesc strategy + sorted := sortByCommitDateDesc(c, other) + newer := sorted[0] + older := sorted[1] + + newerHistory, err := ancestorsIndex(older, newer) + if err == errIsReachable { + return []*Commit{older}, nil + } + + if err != nil { + return nil, err + } + + var res []*Commit + inNewerHistory := isInIndexCommitFilter(newerHistory) + resIter := NewFilterCommitIter(older, &inNewerHistory, &inNewerHistory) + _ = resIter.ForEach(func(commit *Commit) error { + res = append(res, commit) + return nil + }) + + return Independents(res) +} + +// IsAncestor returns true if the actual commit is ancestor of the passed one. +// It returns an error if the history is not transversable +// It mimics the behavior of `git merge --is-ancestor actual other` +func (c *Commit) IsAncestor(other *Commit) (bool, error) { + found := false + iter := NewCommitPreorderIter(other, nil, nil) + err := iter.ForEach(func(comm *Commit) error { + if comm.Hash != c.Hash { + return nil + } + + found = true + return storer.ErrStop + }) + + return found, err +} + +// ancestorsIndex returns a map with the ancestors of the starting commit if the +// excluded one is not one of them. It returns errIsReachable if the excluded commit +// is ancestor of the starting, or another error if the history is not traversable. +func ancestorsIndex(excluded, starting *Commit) (map[plumbing.Hash]struct{}, error) { + if excluded.Hash.String() == starting.Hash.String() { + return nil, errIsReachable + } + + startingHistory := map[plumbing.Hash]struct{}{} + startingIter := NewCommitIterBSF(starting, nil, nil) + err := startingIter.ForEach(func(commit *Commit) error { + if commit.Hash == excluded.Hash { + return errIsReachable + } + + startingHistory[commit.Hash] = struct{}{} + return nil + }) + + if err != nil { + return nil, err + } + + return startingHistory, nil +} + +// Independents returns a subset of the passed commits, that are not reachable the others +// It mimics the behavior of `git merge-base --independent commit...`. +func Independents(commits []*Commit) ([]*Commit, error) { + // use sortedByCommitDateDesc strategy + candidates := sortByCommitDateDesc(commits...) + candidates = removeDuplicated(candidates) + + seen := map[plumbing.Hash]struct{}{} + var isLimit CommitFilter = func(commit *Commit) bool { + _, ok := seen[commit.Hash] + return ok + } + + if len(candidates) < 2 { + return candidates, nil + } + + pos := 0 + for { + from := candidates[pos] + others := remove(candidates, from) + fromHistoryIter := NewFilterCommitIter(from, nil, &isLimit) + err := fromHistoryIter.ForEach(func(fromAncestor *Commit) error { + for _, other := range others { + if fromAncestor.Hash == other.Hash { + candidates = remove(candidates, other) + others = remove(others, other) + } + } + + if len(candidates) == 1 { + return storer.ErrStop + } + + seen[fromAncestor.Hash] = struct{}{} + return nil + }) + + if err != nil { + return nil, err + } + + nextPos := indexOf(candidates, from) + 1 + if nextPos >= len(candidates) { + break + } + + pos = nextPos + } + + return candidates, nil +} + +// sortByCommitDateDesc returns the passed commits, sorted by `committer.When desc` +// +// Following this strategy, it is tried to reduce the time needed when walking +// the history from one commit to reach the others. It is assumed that ancestors +// use to be committed before its descendant; +// That way `Independents(A^, A)` will be processed as being `Independents(A, A^)`; +// so starting by `A` it will be reached `A^` way sooner than walking from `A^` +// to the initial commit, and then from `A` to `A^`. +func sortByCommitDateDesc(commits ...*Commit) []*Commit { + sorted := make([]*Commit, len(commits)) + copy(sorted, commits) + sort.Slice(sorted, func(i, j int) bool { + return sorted[i].Committer.When.After(sorted[j].Committer.When) + }) + + return sorted +} + +// indexOf returns the first position where target was found in the passed commits +func indexOf(commits []*Commit, target *Commit) int { + for i, commit := range commits { + if target.Hash == commit.Hash { + return i + } + } + + return -1 +} + +// remove returns the passed commits excluding the commit toDelete +func remove(commits []*Commit, toDelete *Commit) []*Commit { + res := make([]*Commit, len(commits)) + j := 0 + for _, commit := range commits { + if commit.Hash == toDelete.Hash { + continue + } + + res[j] = commit + j++ + } + + return res[:j] +} + +// removeDuplicated removes duplicated commits from the passed slice of commits +func removeDuplicated(commits []*Commit) []*Commit { + seen := make(map[plumbing.Hash]struct{}, len(commits)) + res := make([]*Commit, len(commits)) + j := 0 + for _, commit := range commits { + if _, ok := seen[commit.Hash]; ok { + continue + } + + seen[commit.Hash] = struct{}{} + res[j] = commit + j++ + } + + return res[:j] +} + +// isInIndexCommitFilter returns a commitFilter that returns true +// if the commit is in the passed index. +func isInIndexCommitFilter(index map[plumbing.Hash]struct{}) CommitFilter { + return func(c *Commit) bool { + _, ok := index[c.Hash] + return ok + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go new file mode 100644 index 0000000000..13b1e91c9c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/object.go @@ -0,0 +1,239 @@ +// Package object contains implementations of all Git objects and utility +// functions to work with them. +package object + +import ( + "bytes" + "errors" + "fmt" + "io" + "strconv" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" +) + +// ErrUnsupportedObject trigger when a non-supported object is being decoded. +var ErrUnsupportedObject = errors.New("unsupported object type") + +// Object is a generic representation of any git object. It is implemented by +// Commit, Tree, Blob, and Tag, and includes the functions that are common to +// them. +// +// Object is returned when an object can be of any type. It is frequently used +// with a type cast to acquire the specific type of object: +// +// func process(obj Object) { +// switch o := obj.(type) { +// case *Commit: +// // o is a Commit +// case *Tree: +// // o is a Tree +// case *Blob: +// // o is a Blob +// case *Tag: +// // o is a Tag +// } +// } +// +// This interface is intentionally different from plumbing.EncodedObject, which +// is a lower level interface used by storage implementations to read and write +// objects in its encoded form. +type Object interface { + ID() plumbing.Hash + Type() plumbing.ObjectType + Decode(plumbing.EncodedObject) error + Encode(plumbing.EncodedObject) error +} + +// GetObject gets an object from an object storer and decodes it. +func GetObject(s storer.EncodedObjectStorer, h plumbing.Hash) (Object, error) { + o, err := s.EncodedObject(plumbing.AnyObject, h) + if err != nil { + return nil, err + } + + return DecodeObject(s, o) +} + +// DecodeObject decodes an encoded object into an Object and associates it to +// the given object storer. +func DecodeObject(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (Object, error) { + switch o.Type() { + case plumbing.CommitObject: + return DecodeCommit(s, o) + case plumbing.TreeObject: + return DecodeTree(s, o) + case plumbing.BlobObject: + return DecodeBlob(o) + case plumbing.TagObject: + return DecodeTag(s, o) + default: + return nil, plumbing.ErrInvalidType + } +} + +// DateFormat is the format being used in the original git implementation +const DateFormat = "Mon Jan 02 15:04:05 2006 -0700" + +// Signature is used to identify who and when created a commit or tag. +type Signature struct { + // Name represents a person name. It is an arbitrary string. + Name string + // Email is an email, but it cannot be assumed to be well-formed. + Email string + // When is the timestamp of the signature. + When time.Time +} + +// Decode decodes a byte slice into a signature +func (s *Signature) Decode(b []byte) { + open := bytes.LastIndexByte(b, '<') + close := bytes.LastIndexByte(b, '>') + if open == -1 || close == -1 { + return + } + + if close < open { + return + } + + s.Name = string(bytes.Trim(b[:open], " ")) + s.Email = string(b[open+1 : close]) + + hasTime := close+2 < len(b) + if hasTime { + s.decodeTimeAndTimeZone(b[close+2:]) + } +} + +// Encode encodes a Signature into a writer. +func (s *Signature) Encode(w io.Writer) error { + if _, err := fmt.Fprintf(w, "%s <%s> ", s.Name, s.Email); err != nil { + return err + } + if err := s.encodeTimeAndTimeZone(w); err != nil { + return err + } + return nil +} + +var timeZoneLength = 5 + +func (s *Signature) decodeTimeAndTimeZone(b []byte) { + space := bytes.IndexByte(b, ' ') + if space == -1 { + space = len(b) + } + + ts, err := strconv.ParseInt(string(b[:space]), 10, 64) + if err != nil { + return + } + + s.When = time.Unix(ts, 0).In(time.UTC) + var tzStart = space + 1 + if tzStart >= len(b) || tzStart+timeZoneLength > len(b) { + return + } + + timezone := string(b[tzStart : tzStart+timeZoneLength]) + tzhours, err1 := strconv.ParseInt(timezone[0:3], 10, 64) + tzmins, err2 := strconv.ParseInt(timezone[3:], 10, 64) + if err1 != nil || err2 != nil { + return + } + if tzhours < 0 { + tzmins *= -1 + } + + tz := time.FixedZone("", int(tzhours*60*60+tzmins*60)) + + s.When = s.When.In(tz) +} + +func (s *Signature) encodeTimeAndTimeZone(w io.Writer) error { + u := s.When.Unix() + if u < 0 { + u = 0 + } + _, err := fmt.Fprintf(w, "%d %s", u, s.When.Format("-0700")) + return err +} + +func (s *Signature) String() string { + return fmt.Sprintf("%s <%s>", s.Name, s.Email) +} + +// ObjectIter provides an iterator for a set of objects. +type ObjectIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewObjectIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns an *ObjectIter that iterates over all +// objects contained in the storer.EncodedObjectIter. +func NewObjectIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *ObjectIter { + return &ObjectIter{iter, s} +} + +// Next moves the iterator to the next object and returns a pointer to it. If +// there are no more objects, it returns io.EOF. +func (iter *ObjectIter) Next() (Object, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + o, err := iter.toObject(obj) + if err == plumbing.ErrInvalidType { + continue + } + + if err != nil { + return nil, err + } + + return o, nil + } +} + +// ForEach call the cb function for each object contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *ObjectIter) ForEach(cb func(Object) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + o, err := iter.toObject(obj) + if err == plumbing.ErrInvalidType { + return nil + } + + if err != nil { + return err + } + + return cb(o) + }) +} + +func (iter *ObjectIter) toObject(obj plumbing.EncodedObject) (Object, error) { + switch obj.Type() { + case plumbing.BlobObject: + blob := &Blob{} + return blob, blob.Decode(obj) + case plumbing.TreeObject: + tree := &Tree{s: iter.s} + return tree, tree.Decode(obj) + case plumbing.CommitObject: + commit := &Commit{} + return commit, commit.Decode(obj) + case plumbing.TagObject: + tag := &Tag{} + return tag, tag.Decode(obj) + default: + return nil, plumbing.ErrInvalidType + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go new file mode 100644 index 0000000000..56b62c191f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/patch.go @@ -0,0 +1,354 @@ +package object + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "math" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + fdiff "github.com/go-git/go-git/v5/plumbing/format/diff" + "github.com/go-git/go-git/v5/utils/diff" + + dmp "github.com/sergi/go-diff/diffmatchpatch" +) + +var ( + ErrCanceled = errors.New("operation canceled") +) + +func getPatch(message string, changes ...*Change) (*Patch, error) { + ctx := context.Background() + return getPatchContext(ctx, message, changes...) +} + +func getPatchContext(ctx context.Context, message string, changes ...*Change) (*Patch, error) { + var filePatches []fdiff.FilePatch + for _, c := range changes { + select { + case <-ctx.Done(): + return nil, ErrCanceled + default: + } + + fp, err := filePatchWithContext(ctx, c) + if err != nil { + return nil, err + } + + filePatches = append(filePatches, fp) + } + + return &Patch{message, filePatches}, nil +} + +func filePatchWithContext(ctx context.Context, c *Change) (fdiff.FilePatch, error) { + from, to, err := c.Files() + if err != nil { + return nil, err + } + fromContent, fIsBinary, err := fileContent(from) + if err != nil { + return nil, err + } + + toContent, tIsBinary, err := fileContent(to) + if err != nil { + return nil, err + } + + if fIsBinary || tIsBinary { + return &textFilePatch{from: c.From, to: c.To}, nil + } + + diffs := diff.Do(fromContent, toContent) + + var chunks []fdiff.Chunk + for _, d := range diffs { + select { + case <-ctx.Done(): + return nil, ErrCanceled + default: + } + + var op fdiff.Operation + switch d.Type { + case dmp.DiffEqual: + op = fdiff.Equal + case dmp.DiffDelete: + op = fdiff.Delete + case dmp.DiffInsert: + op = fdiff.Add + } + + chunks = append(chunks, &textChunk{d.Text, op}) + } + + return &textFilePatch{ + chunks: chunks, + from: c.From, + to: c.To, + }, nil + +} + +func filePatch(c *Change) (fdiff.FilePatch, error) { + return filePatchWithContext(context.Background(), c) +} + +func fileContent(f *File) (content string, isBinary bool, err error) { + if f == nil { + return + } + + isBinary, err = f.IsBinary() + if err != nil || isBinary { + return + } + + content, err = f.Contents() + + return +} + +// Patch is an implementation of fdiff.Patch interface +type Patch struct { + message string + filePatches []fdiff.FilePatch +} + +func (p *Patch) FilePatches() []fdiff.FilePatch { + return p.filePatches +} + +func (p *Patch) Message() string { + return p.message +} + +func (p *Patch) Encode(w io.Writer) error { + ue := fdiff.NewUnifiedEncoder(w, fdiff.DefaultContextLines) + + return ue.Encode(p) +} + +func (p *Patch) Stats() FileStats { + return getFileStatsFromFilePatches(p.FilePatches()) +} + +func (p *Patch) String() string { + buf := bytes.NewBuffer(nil) + err := p.Encode(buf) + if err != nil { + return fmt.Sprintf("malformed patch: %s", err.Error()) + } + + return buf.String() +} + +// changeEntryWrapper is an implementation of fdiff.File interface +type changeEntryWrapper struct { + ce ChangeEntry +} + +func (f *changeEntryWrapper) Hash() plumbing.Hash { + if !f.ce.TreeEntry.Mode.IsFile() { + return plumbing.ZeroHash + } + + return f.ce.TreeEntry.Hash +} + +func (f *changeEntryWrapper) Mode() filemode.FileMode { + return f.ce.TreeEntry.Mode +} +func (f *changeEntryWrapper) Path() string { + if !f.ce.TreeEntry.Mode.IsFile() { + return "" + } + + return f.ce.Name +} + +func (f *changeEntryWrapper) Empty() bool { + return !f.ce.TreeEntry.Mode.IsFile() +} + +// textFilePatch is an implementation of fdiff.FilePatch interface +type textFilePatch struct { + chunks []fdiff.Chunk + from, to ChangeEntry +} + +func (tf *textFilePatch) Files() (from fdiff.File, to fdiff.File) { + f := &changeEntryWrapper{tf.from} + t := &changeEntryWrapper{tf.to} + + if !f.Empty() { + from = f + } + + if !t.Empty() { + to = t + } + + return +} + +func (tf *textFilePatch) IsBinary() bool { + return len(tf.chunks) == 0 +} + +func (tf *textFilePatch) Chunks() []fdiff.Chunk { + return tf.chunks +} + +// textChunk is an implementation of fdiff.Chunk interface +type textChunk struct { + content string + op fdiff.Operation +} + +func (t *textChunk) Content() string { + return t.content +} + +func (t *textChunk) Type() fdiff.Operation { + return t.op +} + +// FileStat stores the status of changes in content of a file. +type FileStat struct { + Name string + Addition int + Deletion int +} + +func (fs FileStat) String() string { + return printStat([]FileStat{fs}) +} + +// FileStats is a collection of FileStat. +type FileStats []FileStat + +func (fileStats FileStats) String() string { + return printStat(fileStats) +} + +func printStat(fileStats []FileStat) string { + padLength := float64(len(" ")) + newlineLength := float64(len("\n")) + separatorLength := float64(len("|")) + // Soft line length limit. The text length calculation below excludes + // length of the change number. Adding that would take it closer to 80, + // but probably not more than 80, until it's a huge number. + lineLength := 72.0 + + // Get the longest filename and longest total change. + var longestLength float64 + var longestTotalChange float64 + for _, fs := range fileStats { + if int(longestLength) < len(fs.Name) { + longestLength = float64(len(fs.Name)) + } + totalChange := fs.Addition + fs.Deletion + if int(longestTotalChange) < totalChange { + longestTotalChange = float64(totalChange) + } + } + + // Parts of the output: + // |<+++/---> + // example: " main.go | 10 +++++++--- " + + // + leftTextLength := padLength + longestLength + padLength + + // <+++++/-----> + // Excluding number length here. + rightTextLength := padLength + padLength + newlineLength + + totalTextArea := leftTextLength + separatorLength + rightTextLength + heightOfHistogram := lineLength - totalTextArea + + // Scale the histogram. + var scaleFactor float64 + if longestTotalChange > heightOfHistogram { + // Scale down to heightOfHistogram. + scaleFactor = longestTotalChange / heightOfHistogram + } else { + scaleFactor = 1.0 + } + + finalOutput := "" + for _, fs := range fileStats { + addn := float64(fs.Addition) + deln := float64(fs.Deletion) + addc := int(math.Floor(addn/scaleFactor)) + delc := int(math.Floor(deln/scaleFactor)) + if addc < 0 { + addc = 0 + } + if delc < 0 { + delc = 0 + } + adds := strings.Repeat("+", addc) + dels := strings.Repeat("-", delc) + finalOutput += fmt.Sprintf(" %s | %d %s%s\n", fs.Name, (fs.Addition + fs.Deletion), adds, dels) + } + + return finalOutput +} + +func getFileStatsFromFilePatches(filePatches []fdiff.FilePatch) FileStats { + var fileStats FileStats + + for _, fp := range filePatches { + // ignore empty patches (binary files, submodule refs updates) + if len(fp.Chunks()) == 0 { + continue + } + + cs := FileStat{} + from, to := fp.Files() + if from == nil { + // New File is created. + cs.Name = to.Path() + } else if to == nil { + // File is deleted. + cs.Name = from.Path() + } else if from.Path() != to.Path() { + // File is renamed. Not supported. + // cs.Name = fmt.Sprintf("%s => %s", from.Path(), to.Path()) + } else { + cs.Name = from.Path() + } + + for _, chunk := range fp.Chunks() { + s := chunk.Content() + if len(s) == 0 { + continue + } + + switch chunk.Type() { + case fdiff.Add: + cs.Addition += strings.Count(s, "\n") + if s[len(s)-1] != '\n' { + cs.Addition++ + } + case fdiff.Delete: + cs.Deletion += strings.Count(s, "\n") + if s[len(s)-1] != '\n' { + cs.Deletion++ + } + } + } + + fileStats = append(fileStats, cs) + } + + return fileStats +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go new file mode 100644 index 0000000000..7fed72c2f5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/rename.go @@ -0,0 +1,813 @@ +package object + +import ( + "errors" + "io" + "sort" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/utils/ioutil" + "github.com/go-git/go-git/v5/utils/merkletrie" +) + +// DetectRenames detects the renames in the given changes on two trees with +// the given options. It will return the given changes grouping additions and +// deletions into modifications when possible. +// If options is nil, the default diff tree options will be used. +func DetectRenames( + changes Changes, + opts *DiffTreeOptions, +) (Changes, error) { + if opts == nil { + opts = DefaultDiffTreeOptions + } + + detector := &renameDetector{ + renameScore: int(opts.RenameScore), + renameLimit: int(opts.RenameLimit), + onlyExact: opts.OnlyExactRenames, + } + + for _, c := range changes { + action, err := c.Action() + if err != nil { + return nil, err + } + + switch action { + case merkletrie.Insert: + detector.added = append(detector.added, c) + case merkletrie.Delete: + detector.deleted = append(detector.deleted, c) + default: + detector.modified = append(detector.modified, c) + } + } + + return detector.detect() +} + +// renameDetector will detect and resolve renames in a set of changes. +// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/RenameDetector.java +type renameDetector struct { + added []*Change + deleted []*Change + modified []*Change + + renameScore int + renameLimit int + onlyExact bool +} + +// detectExactRenames detects matches files that were deleted with files that +// were added where the hash is the same on both. If there are multiple targets +// the one with the most similar path will be chosen as the rename and the +// rest as either deletions or additions. +func (d *renameDetector) detectExactRenames() { + added := groupChangesByHash(d.added) + deletes := groupChangesByHash(d.deleted) + var uniqueAdds []*Change + var nonUniqueAdds [][]*Change + var addedLeft []*Change + + for _, cs := range added { + if len(cs) == 1 { + uniqueAdds = append(uniqueAdds, cs[0]) + } else { + nonUniqueAdds = append(nonUniqueAdds, cs) + } + } + + for _, c := range uniqueAdds { + hash := changeHash(c) + deleted := deletes[hash] + + if len(deleted) == 1 { + if sameMode(c, deleted[0]) { + d.modified = append(d.modified, &Change{From: deleted[0].From, To: c.To}) + delete(deletes, hash) + } else { + addedLeft = append(addedLeft, c) + } + } else if len(deleted) > 1 { + bestMatch := bestNameMatch(c, deleted) + if bestMatch != nil && sameMode(c, bestMatch) { + d.modified = append(d.modified, &Change{From: bestMatch.From, To: c.To}) + delete(deletes, hash) + + var newDeletes = make([]*Change, 0, len(deleted)-1) + for _, d := range deleted { + if d != bestMatch { + newDeletes = append(newDeletes, d) + } + } + deletes[hash] = newDeletes + } + } else { + addedLeft = append(addedLeft, c) + } + } + + for _, added := range nonUniqueAdds { + hash := changeHash(added[0]) + deleted := deletes[hash] + + if len(deleted) == 1 { + deleted := deleted[0] + bestMatch := bestNameMatch(deleted, added) + if bestMatch != nil && sameMode(deleted, bestMatch) { + d.modified = append(d.modified, &Change{From: deleted.From, To: bestMatch.To}) + delete(deletes, hash) + + for _, c := range added { + if c != bestMatch { + addedLeft = append(addedLeft, c) + } + } + } else { + addedLeft = append(addedLeft, added...) + } + } else if len(deleted) > 1 { + maxSize := len(deleted) * len(added) + if d.renameLimit > 0 && d.renameLimit < maxSize { + maxSize = d.renameLimit + } + + matrix := make(similarityMatrix, 0, maxSize) + + for delIdx, del := range deleted { + deletedName := changeName(del) + + for addIdx, add := range added { + addedName := changeName(add) + + score := nameSimilarityScore(addedName, deletedName) + matrix = append(matrix, similarityPair{added: addIdx, deleted: delIdx, score: score}) + + if len(matrix) >= maxSize { + break + } + } + + if len(matrix) >= maxSize { + break + } + } + + sort.Stable(matrix) + + usedAdds := make(map[*Change]struct{}) + usedDeletes := make(map[*Change]struct{}) + for i := len(matrix) - 1; i >= 0; i-- { + del := deleted[matrix[i].deleted] + add := added[matrix[i].added] + + if add == nil || del == nil { + // it was already matched + continue + } + + usedAdds[add] = struct{}{} + usedDeletes[del] = struct{}{} + d.modified = append(d.modified, &Change{From: del.From, To: add.To}) + added[matrix[i].added] = nil + deleted[matrix[i].deleted] = nil + } + + for _, c := range added { + if _, ok := usedAdds[c]; !ok && c != nil { + addedLeft = append(addedLeft, c) + } + } + + var newDeletes = make([]*Change, 0, len(deleted)-len(usedDeletes)) + for _, c := range deleted { + if _, ok := usedDeletes[c]; !ok && c != nil { + newDeletes = append(newDeletes, c) + } + } + deletes[hash] = newDeletes + } else { + addedLeft = append(addedLeft, added...) + } + } + + d.added = addedLeft + d.deleted = nil + for _, dels := range deletes { + d.deleted = append(d.deleted, dels...) + } +} + +// detectContentRenames detects renames based on the similarity of the content +// in the files by building a matrix of pairs between sources and destinations +// and matching by the highest score. +// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityRenameDetector.java +func (d *renameDetector) detectContentRenames() error { + cnt := max(len(d.added), len(d.deleted)) + if d.renameLimit > 0 && cnt > d.renameLimit { + return nil + } + + srcs, dsts := d.deleted, d.added + matrix, err := buildSimilarityMatrix(srcs, dsts, d.renameScore) + if err != nil { + return err + } + renames := make([]*Change, 0, min(len(matrix), len(dsts))) + + // Match rename pairs on a first come, first serve basis until + // we have looked at everything that is above the minimum score. + for i := len(matrix) - 1; i >= 0; i-- { + pair := matrix[i] + src := srcs[pair.deleted] + dst := dsts[pair.added] + + if dst == nil || src == nil { + // It was already matched before + continue + } + + renames = append(renames, &Change{From: src.From, To: dst.To}) + + // Claim destination and source as matched + dsts[pair.added] = nil + srcs[pair.deleted] = nil + } + + d.modified = append(d.modified, renames...) + d.added = compactChanges(dsts) + d.deleted = compactChanges(srcs) + + return nil +} + +func (d *renameDetector) detect() (Changes, error) { + if len(d.added) > 0 && len(d.deleted) > 0 { + d.detectExactRenames() + + if !d.onlyExact { + if err := d.detectContentRenames(); err != nil { + return nil, err + } + } + } + + result := make(Changes, 0, len(d.added)+len(d.deleted)+len(d.modified)) + result = append(result, d.added...) + result = append(result, d.deleted...) + result = append(result, d.modified...) + + sort.Stable(result) + + return result, nil +} + +func bestNameMatch(change *Change, changes []*Change) *Change { + var best *Change + var bestScore int + + cname := changeName(change) + + for _, c := range changes { + score := nameSimilarityScore(cname, changeName(c)) + if score > bestScore { + bestScore = score + best = c + } + } + + return best +} + +func nameSimilarityScore(a, b string) int { + aDirLen := strings.LastIndexByte(a, '/') + 1 + bDirLen := strings.LastIndexByte(b, '/') + 1 + + dirMin := min(aDirLen, bDirLen) + dirMax := max(aDirLen, bDirLen) + + var dirScoreLtr, dirScoreRtl int + if dirMax == 0 { + dirScoreLtr = 100 + dirScoreRtl = 100 + } else { + var dirSim int + + for ; dirSim < dirMin; dirSim++ { + if a[dirSim] != b[dirSim] { + break + } + } + + dirScoreLtr = dirSim * 100 / dirMax + + if dirScoreLtr == 100 { + dirScoreRtl = 100 + } else { + for dirSim = 0; dirSim < dirMin; dirSim++ { + if a[aDirLen-1-dirSim] != b[bDirLen-1-dirSim] { + break + } + } + dirScoreRtl = dirSim * 100 / dirMax + } + } + + fileMin := min(len(a)-aDirLen, len(b)-bDirLen) + fileMax := max(len(a)-aDirLen, len(b)-bDirLen) + + fileSim := 0 + for ; fileSim < fileMin; fileSim++ { + if a[len(a)-1-fileSim] != b[len(b)-1-fileSim] { + break + } + } + fileScore := fileSim * 100 / fileMax + + return (((dirScoreLtr + dirScoreRtl) * 25) + (fileScore * 50)) / 100 +} + +func changeName(c *Change) string { + if c.To != empty { + return c.To.Name + } + return c.From.Name +} + +func changeHash(c *Change) plumbing.Hash { + if c.To != empty { + return c.To.TreeEntry.Hash + } + + return c.From.TreeEntry.Hash +} + +func changeMode(c *Change) filemode.FileMode { + if c.To != empty { + return c.To.TreeEntry.Mode + } + + return c.From.TreeEntry.Mode +} + +func sameMode(a, b *Change) bool { + return changeMode(a) == changeMode(b) +} + +func groupChangesByHash(changes []*Change) map[plumbing.Hash][]*Change { + var result = make(map[plumbing.Hash][]*Change) + for _, c := range changes { + hash := changeHash(c) + result[hash] = append(result[hash], c) + } + return result +} + +type similarityMatrix []similarityPair + +func (m similarityMatrix) Len() int { return len(m) } +func (m similarityMatrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } +func (m similarityMatrix) Less(i, j int) bool { + if m[i].score == m[j].score { + if m[i].added == m[j].added { + return m[i].deleted < m[j].deleted + } + return m[i].added < m[j].added + } + return m[i].score < m[j].score +} + +type similarityPair struct { + // index of the added file + added int + // index of the deleted file + deleted int + // similarity score + score int +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func buildSimilarityMatrix(srcs, dsts []*Change, renameScore int) (similarityMatrix, error) { + // Allocate for the worst-case scenario where every pair has a score + // that we need to consider. We might not need that many. + matrix := make(similarityMatrix, 0, len(srcs)*len(dsts)) + srcSizes := make([]int64, len(srcs)) + dstSizes := make([]int64, len(dsts)) + dstTooLarge := make(map[int]bool) + + // Consider each pair of files, if the score is above the minimum + // threshold we need to record that scoring in the matrix so we can + // later find the best matches. +outerLoop: + for srcIdx, src := range srcs { + if changeMode(src) != filemode.Regular { + continue + } + + // Declare the from file and the similarity index here to be able to + // reuse it inside the inner loop. The reason to not initialize them + // here is so we can skip the initialization in case they happen to + // not be needed later. They will be initialized inside the inner + // loop if and only if they're needed and reused in subsequent passes. + var from *File + var s *similarityIndex + var err error + for dstIdx, dst := range dsts { + if changeMode(dst) != filemode.Regular { + continue + } + + if dstTooLarge[dstIdx] { + continue + } + + var to *File + srcSize := srcSizes[srcIdx] + if srcSize == 0 { + from, _, err = src.Files() + if err != nil { + return nil, err + } + srcSize = from.Size + 1 + srcSizes[srcIdx] = srcSize + } + + dstSize := dstSizes[dstIdx] + if dstSize == 0 { + _, to, err = dst.Files() + if err != nil { + return nil, err + } + dstSize = to.Size + 1 + dstSizes[dstIdx] = dstSize + } + + min, max := srcSize, dstSize + if dstSize < srcSize { + min = dstSize + max = srcSize + } + + if int(min*100/max) < renameScore { + // File sizes are too different to be a match + continue + } + + if s == nil { + s, err = fileSimilarityIndex(from) + if err != nil { + if err == errIndexFull { + continue outerLoop + } + return nil, err + } + } + + if to == nil { + _, to, err = dst.Files() + if err != nil { + return nil, err + } + } + + di, err := fileSimilarityIndex(to) + if err != nil { + if err == errIndexFull { + dstTooLarge[dstIdx] = true + } + + return nil, err + } + + contentScore := s.score(di, 10000) + // The name score returns a value between 0 and 100, so we need to + // convert it to the same range as the content score. + nameScore := nameSimilarityScore(src.From.Name, dst.To.Name) * 100 + score := (contentScore*99 + nameScore*1) / 10000 + + if score < renameScore { + continue + } + + matrix = append(matrix, similarityPair{added: dstIdx, deleted: srcIdx, score: score}) + } + } + + sort.Stable(matrix) + + return matrix, nil +} + +func compactChanges(changes []*Change) []*Change { + var result []*Change + for _, c := range changes { + if c != nil { + result = append(result, c) + } + } + return result +} + +const ( + keyShift = 32 + maxCountValue = (1 << keyShift) - 1 +) + +var errIndexFull = errors.New("index is full") + +// similarityIndex is an index structure of lines/blocks in one file. +// This structure can be used to compute an approximation of the similarity +// between two files. +// To save space in memory, this index uses a space efficient encoding which +// will not exceed 1MiB per instance. The index starts out at a smaller size +// (closer to 2KiB), but may grow as more distinct blocks within the scanned +// file are discovered. +// see: https://github.com/eclipse/jgit/blob/master/org.eclipse.jgit/src/org/eclipse/jgit/diff/SimilarityIndex.java +type similarityIndex struct { + hashed uint64 + // number of non-zero entries in hashes + numHashes int + growAt int + hashes []keyCountPair + hashBits int +} + +func fileSimilarityIndex(f *File) (*similarityIndex, error) { + idx := newSimilarityIndex() + if err := idx.hash(f); err != nil { + return nil, err + } + + sort.Stable(keyCountPairs(idx.hashes)) + + return idx, nil +} + +func newSimilarityIndex() *similarityIndex { + return &similarityIndex{ + hashBits: 8, + hashes: make([]keyCountPair, 1<<8), + growAt: shouldGrowAt(8), + } +} + +func (i *similarityIndex) hash(f *File) error { + isBin, err := f.IsBinary() + if err != nil { + return err + } + + r, err := f.Reader() + if err != nil { + return err + } + + defer ioutil.CheckClose(r, &err) + + return i.hashContent(r, f.Size, isBin) +} + +func (i *similarityIndex) hashContent(r io.Reader, size int64, isBin bool) error { + var buf = make([]byte, 4096) + var ptr, cnt int + remaining := size + + for 0 < remaining { + hash := 5381 + var blockHashedCnt uint64 + + // Hash one line or block, whatever happens first + n := int64(0) + for { + if ptr == cnt { + ptr = 0 + var err error + cnt, err = io.ReadFull(r, buf) + if err != nil && err != io.ErrUnexpectedEOF { + return err + } + + if cnt == 0 { + return io.EOF + } + } + n++ + c := buf[ptr] & 0xff + ptr++ + + // Ignore CR in CRLF sequence if it's text + if !isBin && c == '\r' && ptr < cnt && buf[ptr] == '\n' { + continue + } + blockHashedCnt++ + + if c == '\n' { + break + } + + hash = (hash << 5) + hash + int(c) + + if n >= 64 || n >= remaining { + break + } + } + i.hashed += blockHashedCnt + if err := i.add(hash, blockHashedCnt); err != nil { + return err + } + remaining -= n + } + + return nil +} + +// score computes the similarity score between this index and another one. +// A region of a file is defined as a line in a text file or a fixed-size +// block in a binary file. To prepare an index, each region in the file is +// hashed; the values and counts of hashes are retained in a sorted table. +// Define the similarity fraction F as the count of matching regions between +// the two files divided between the maximum count of regions in either file. +// The similarity score is F multiplied by the maxScore constant, yielding a +// range [0, maxScore]. It is defined as maxScore for the degenerate case of +// two empty files. +// The similarity score is symmetrical; i.e. a.score(b) == b.score(a). +func (i *similarityIndex) score(other *similarityIndex, maxScore int) int { + var maxHashed = i.hashed + if maxHashed < other.hashed { + maxHashed = other.hashed + } + if maxHashed == 0 { + return maxScore + } + + return int(i.common(other) * uint64(maxScore) / maxHashed) +} + +func (i *similarityIndex) common(dst *similarityIndex) uint64 { + srcIdx, dstIdx := 0, 0 + if i.numHashes == 0 || dst.numHashes == 0 { + return 0 + } + + var common uint64 + srcKey, dstKey := i.hashes[srcIdx].key(), dst.hashes[dstIdx].key() + + for { + if srcKey == dstKey { + srcCnt, dstCnt := i.hashes[srcIdx].count(), dst.hashes[dstIdx].count() + if srcCnt < dstCnt { + common += srcCnt + } else { + common += dstCnt + } + + srcIdx++ + if srcIdx == len(i.hashes) { + break + } + srcKey = i.hashes[srcIdx].key() + + dstIdx++ + if dstIdx == len(dst.hashes) { + break + } + dstKey = dst.hashes[dstIdx].key() + } else if srcKey < dstKey { + // Region of src that is not in dst + srcIdx++ + if srcIdx == len(i.hashes) { + break + } + srcKey = i.hashes[srcIdx].key() + } else { + // Region of dst that is not in src + dstIdx++ + if dstIdx == len(dst.hashes) { + break + } + dstKey = dst.hashes[dstIdx].key() + } + } + + return common +} + +func (i *similarityIndex) add(key int, cnt uint64) error { + key = int(uint32(key) * 0x9e370001 >> 1) + + j := i.slot(key) + for { + v := i.hashes[j] + if v == 0 { + // It's an empty slot, so we can store it here. + if i.growAt <= i.numHashes { + if err := i.grow(); err != nil { + return err + } + j = i.slot(key) + continue + } + + var err error + i.hashes[j], err = newKeyCountPair(key, cnt) + if err != nil { + return err + } + i.numHashes++ + return nil + } else if v.key() == key { + // It's the same key, so increment the counter. + var err error + i.hashes[j], err = newKeyCountPair(key, v.count()+cnt) + if err != nil { + return err + } + return nil + } else if j+1 >= len(i.hashes) { + j = 0 + } else { + j++ + } + } +} + +type keyCountPair uint64 + +func newKeyCountPair(key int, cnt uint64) (keyCountPair, error) { + if cnt > maxCountValue { + return 0, errIndexFull + } + + return keyCountPair((uint64(key) << keyShift) | cnt), nil +} + +func (p keyCountPair) key() int { + return int(p >> keyShift) +} + +func (p keyCountPair) count() uint64 { + return uint64(p) & maxCountValue +} + +func (i *similarityIndex) slot(key int) int { + // We use 31 - hashBits because the upper bit was already forced + // to be 0 and we want the remaining high bits to be used as the + // table slot. + return int(uint32(key) >> uint(31-i.hashBits)) +} + +func shouldGrowAt(hashBits int) int { + return (1 << uint(hashBits)) * (hashBits - 3) / hashBits +} + +func (i *similarityIndex) grow() error { + if i.hashBits == 30 { + return errIndexFull + } + + old := i.hashes + + i.hashBits++ + i.growAt = shouldGrowAt(i.hashBits) + + // TODO(erizocosmico): find a way to check if it will OOM and return + // errIndexFull instead. + i.hashes = make([]keyCountPair, 1<= len(i.hashes) { + j = 0 + } + } + i.hashes[j] = v + } + } + + return nil +} + +type keyCountPairs []keyCountPair + +func (p keyCountPairs) Len() int { return len(p) } +func (p keyCountPairs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p keyCountPairs) Less(i, j int) bool { return p[i] < p[j] } diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go new file mode 100644 index 0000000000..216010d913 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tag.go @@ -0,0 +1,357 @@ +package object + +import ( + "bufio" + "bytes" + "fmt" + "io" + stdioutil "io/ioutil" + "strings" + + "github.com/ProtonMail/go-crypto/openpgp" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +// Tag represents an annotated tag object. It points to a single git object of +// any type, but tags typically are applied to commit or blob objects. It +// provides a reference that associates the target with a tag name. It also +// contains meta-information about the tag, including the tagger, tag date and +// message. +// +// Note that this is not used for lightweight tags. +// +// https://git-scm.com/book/en/v2/Git-Internals-Git-References#Tags +type Tag struct { + // Hash of the tag. + Hash plumbing.Hash + // Name of the tag. + Name string + // Tagger is the one who created the tag. + Tagger Signature + // Message is an arbitrary text message. + Message string + // PGPSignature is the PGP signature of the tag. + PGPSignature string + // TargetType is the object type of the target. + TargetType plumbing.ObjectType + // Target is the hash of the target object. + Target plumbing.Hash + + s storer.EncodedObjectStorer +} + +// GetTag gets a tag from an object storer and decodes it. +func GetTag(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tag, error) { + o, err := s.EncodedObject(plumbing.TagObject, h) + if err != nil { + return nil, err + } + + return DecodeTag(s, o) +} + +// DecodeTag decodes an encoded object into a *Commit and associates it to the +// given object storer. +func DecodeTag(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tag, error) { + t := &Tag{s: s} + if err := t.Decode(o); err != nil { + return nil, err + } + + return t, nil +} + +// ID returns the object ID of the tag, not the object that the tag references. +// The returned value will always match the current value of Tag.Hash. +// +// ID is present to fulfill the Object interface. +func (t *Tag) ID() plumbing.Hash { + return t.Hash +} + +// Type returns the type of object. It always returns plumbing.TagObject. +// +// Type is present to fulfill the Object interface. +func (t *Tag) Type() plumbing.ObjectType { + return plumbing.TagObject +} + +// Decode transforms a plumbing.EncodedObject into a Tag struct. +func (t *Tag) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.TagObject { + return ErrUnsupportedObject + } + + t.Hash = o.Hash() + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) + for { + var line []byte + line, err = r.ReadBytes('\n') + if err != nil && err != io.EOF { + return err + } + + line = bytes.TrimSpace(line) + if len(line) == 0 { + break // Start of message + } + + split := bytes.SplitN(line, []byte{' '}, 2) + switch string(split[0]) { + case "object": + t.Target = plumbing.NewHash(string(split[1])) + case "type": + t.TargetType, err = plumbing.ParseObjectType(string(split[1])) + if err != nil { + return err + } + case "tag": + t.Name = string(split[1]) + case "tagger": + t.Tagger.Decode(split[1]) + } + + if err == io.EOF { + return nil + } + } + + data, err := stdioutil.ReadAll(r) + if err != nil { + return err + } + + var pgpsig bool + // Check if data contains PGP signature. + if bytes.Contains(data, []byte(beginpgp)) { + // Split the lines at newline. + messageAndSig := bytes.Split(data, []byte("\n")) + + for _, l := range messageAndSig { + if pgpsig { + if bytes.Contains(l, []byte(endpgp)) { + t.PGPSignature += endpgp + "\n" + break + } else { + t.PGPSignature += string(l) + "\n" + } + continue + } + + // Check if it's the beginning of a PGP signature. + if bytes.Contains(l, []byte(beginpgp)) { + t.PGPSignature += beginpgp + "\n" + pgpsig = true + continue + } + + t.Message += string(l) + "\n" + } + } else { + t.Message = string(data) + } + + return nil +} + +// Encode transforms a Tag into a plumbing.EncodedObject. +func (t *Tag) Encode(o plumbing.EncodedObject) error { + return t.encode(o, true) +} + +// EncodeWithoutSignature export a Tag into a plumbing.EncodedObject without the signature (correspond to the payload of the PGP signature). +func (t *Tag) EncodeWithoutSignature(o plumbing.EncodedObject) error { + return t.encode(o, false) +} + +func (t *Tag) encode(o plumbing.EncodedObject, includeSig bool) (err error) { + o.SetType(plumbing.TagObject) + w, err := o.Writer() + if err != nil { + return err + } + defer ioutil.CheckClose(w, &err) + + if _, err = fmt.Fprintf(w, + "object %s\ntype %s\ntag %s\ntagger ", + t.Target.String(), t.TargetType.Bytes(), t.Name); err != nil { + return err + } + + if err = t.Tagger.Encode(w); err != nil { + return err + } + + if _, err = fmt.Fprint(w, "\n\n"); err != nil { + return err + } + + if _, err = fmt.Fprint(w, t.Message); err != nil { + return err + } + + // Note that this is highly sensitive to what it sent along in the message. + // Message *always* needs to end with a newline, or else the message and the + // signature will be concatenated into a corrupt object. Since this is a + // lower-level method, we assume you know what you are doing and have already + // done the needful on the message in the caller. + if includeSig { + if _, err = fmt.Fprint(w, t.PGPSignature); err != nil { + return err + } + } + + return err +} + +// Commit returns the commit pointed to by the tag. If the tag points to a +// different type of object ErrUnsupportedObject will be returned. +func (t *Tag) Commit() (*Commit, error) { + if t.TargetType != plumbing.CommitObject { + return nil, ErrUnsupportedObject + } + + o, err := t.s.EncodedObject(plumbing.CommitObject, t.Target) + if err != nil { + return nil, err + } + + return DecodeCommit(t.s, o) +} + +// Tree returns the tree pointed to by the tag. If the tag points to a commit +// object the tree of that commit will be returned. If the tag does not point +// to a commit or tree object ErrUnsupportedObject will be returned. +func (t *Tag) Tree() (*Tree, error) { + switch t.TargetType { + case plumbing.CommitObject: + c, err := t.Commit() + if err != nil { + return nil, err + } + + return c.Tree() + case plumbing.TreeObject: + return GetTree(t.s, t.Target) + default: + return nil, ErrUnsupportedObject + } +} + +// Blob returns the blob pointed to by the tag. If the tag points to a +// different type of object ErrUnsupportedObject will be returned. +func (t *Tag) Blob() (*Blob, error) { + if t.TargetType != plumbing.BlobObject { + return nil, ErrUnsupportedObject + } + + return GetBlob(t.s, t.Target) +} + +// Object returns the object pointed to by the tag. +func (t *Tag) Object() (Object, error) { + o, err := t.s.EncodedObject(t.TargetType, t.Target) + if err != nil { + return nil, err + } + + return DecodeObject(t.s, o) +} + +// String returns the meta information contained in the tag as a formatted +// string. +func (t *Tag) String() string { + obj, _ := t.Object() + + return fmt.Sprintf( + "%s %s\nTagger: %s\nDate: %s\n\n%s\n%s", + plumbing.TagObject, t.Name, t.Tagger.String(), t.Tagger.When.Format(DateFormat), + t.Message, objectAsString(obj), + ) +} + +// Verify performs PGP verification of the tag with a provided armored +// keyring and returns openpgp.Entity associated with verifying key on success. +func (t *Tag) Verify(armoredKeyRing string) (*openpgp.Entity, error) { + keyRingReader := strings.NewReader(armoredKeyRing) + keyring, err := openpgp.ReadArmoredKeyRing(keyRingReader) + if err != nil { + return nil, err + } + + // Extract signature. + signature := strings.NewReader(t.PGPSignature) + + encoded := &plumbing.MemoryObject{} + // Encode tag components, excluding signature and get a reader object. + if err := t.EncodeWithoutSignature(encoded); err != nil { + return nil, err + } + er, err := encoded.Reader() + if err != nil { + return nil, err + } + + return openpgp.CheckArmoredDetachedSignature(keyring, er, signature, nil) +} + +// TagIter provides an iterator for a set of tags. +type TagIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewTagIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *TagIter that iterates over all +// tags contained in the storer.EncodedObjectIter. +// +// Any non-tag object returned by the storer.EncodedObjectIter is skipped. +func NewTagIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TagIter { + return &TagIter{iter, s} +} + +// Next moves the iterator to the next tag and returns a pointer to it. If +// there are no more tags, it returns io.EOF. +func (iter *TagIter) Next() (*Tag, error) { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + return DecodeTag(iter.s, obj) +} + +// ForEach call the cb function for each tag contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *TagIter) ForEach(cb func(*Tag) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + t, err := DecodeTag(iter.s, obj) + if err != nil { + return err + } + + return cb(t) + }) +} + +func objectAsString(obj Object) string { + switch o := obj.(type) { + case *Commit: + return o.String() + default: + return "" + } +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go new file mode 100644 index 0000000000..5e6378ca49 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go @@ -0,0 +1,525 @@ +package object + +import ( + "bufio" + "context" + "errors" + "fmt" + "io" + "path" + "path/filepath" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/utils/ioutil" +) + +const ( + maxTreeDepth = 1024 + startingStackSize = 8 +) + +// New errors defined by this package. +var ( + ErrMaxTreeDepth = errors.New("maximum tree depth exceeded") + ErrFileNotFound = errors.New("file not found") + ErrDirectoryNotFound = errors.New("directory not found") + ErrEntryNotFound = errors.New("entry not found") +) + +// Tree is basically like a directory - it references a bunch of other trees +// and/or blobs (i.e. files and sub-directories) +type Tree struct { + Entries []TreeEntry + Hash plumbing.Hash + + s storer.EncodedObjectStorer + m map[string]*TreeEntry + t map[string]*Tree // tree path cache +} + +// GetTree gets a tree from an object storer and decodes it. +func GetTree(s storer.EncodedObjectStorer, h plumbing.Hash) (*Tree, error) { + o, err := s.EncodedObject(plumbing.TreeObject, h) + if err != nil { + return nil, err + } + + return DecodeTree(s, o) +} + +// DecodeTree decodes an encoded object into a *Tree and associates it to the +// given object storer. +func DecodeTree(s storer.EncodedObjectStorer, o plumbing.EncodedObject) (*Tree, error) { + t := &Tree{s: s} + if err := t.Decode(o); err != nil { + return nil, err + } + + return t, nil +} + +// TreeEntry represents a file +type TreeEntry struct { + Name string + Mode filemode.FileMode + Hash plumbing.Hash +} + +// File returns the hash of the file identified by the `path` argument. +// The path is interpreted as relative to the tree receiver. +func (t *Tree) File(path string) (*File, error) { + e, err := t.FindEntry(path) + if err != nil { + return nil, ErrFileNotFound + } + + blob, err := GetBlob(t.s, e.Hash) + if err != nil { + if err == plumbing.ErrObjectNotFound { + return nil, ErrFileNotFound + } + return nil, err + } + + return NewFile(path, e.Mode, blob), nil +} + +// Size returns the plaintext size of an object, without reading it +// into memory. +func (t *Tree) Size(path string) (int64, error) { + e, err := t.FindEntry(path) + if err != nil { + return 0, ErrEntryNotFound + } + + return t.s.EncodedObjectSize(e.Hash) +} + +// Tree returns the tree identified by the `path` argument. +// The path is interpreted as relative to the tree receiver. +func (t *Tree) Tree(path string) (*Tree, error) { + e, err := t.FindEntry(path) + if err != nil { + return nil, ErrDirectoryNotFound + } + + tree, err := GetTree(t.s, e.Hash) + if err == plumbing.ErrObjectNotFound { + return nil, ErrDirectoryNotFound + } + + return tree, err +} + +// TreeEntryFile returns the *File for a given *TreeEntry. +func (t *Tree) TreeEntryFile(e *TreeEntry) (*File, error) { + blob, err := GetBlob(t.s, e.Hash) + if err != nil { + return nil, err + } + + return NewFile(e.Name, e.Mode, blob), nil +} + +// FindEntry search a TreeEntry in this tree or any subtree. +func (t *Tree) FindEntry(path string) (*TreeEntry, error) { + if t.t == nil { + t.t = make(map[string]*Tree) + } + + pathParts := strings.Split(path, "/") + startingTree := t + pathCurrent := "" + + // search for the longest path in the tree path cache + for i := len(pathParts) - 1; i > 1; i-- { + path := filepath.Join(pathParts[:i]...) + + tree, ok := t.t[path] + if ok { + startingTree = tree + pathParts = pathParts[i:] + pathCurrent = path + + break + } + } + + var tree *Tree + var err error + for tree = startingTree; len(pathParts) > 1; pathParts = pathParts[1:] { + if tree, err = tree.dir(pathParts[0]); err != nil { + return nil, err + } + + pathCurrent = filepath.Join(pathCurrent, pathParts[0]) + t.t[pathCurrent] = tree + } + + return tree.entry(pathParts[0]) +} + +func (t *Tree) dir(baseName string) (*Tree, error) { + entry, err := t.entry(baseName) + if err != nil { + return nil, ErrDirectoryNotFound + } + + obj, err := t.s.EncodedObject(plumbing.TreeObject, entry.Hash) + if err != nil { + return nil, err + } + + tree := &Tree{s: t.s} + err = tree.Decode(obj) + + return tree, err +} + +func (t *Tree) entry(baseName string) (*TreeEntry, error) { + if t.m == nil { + t.buildMap() + } + + entry, ok := t.m[baseName] + if !ok { + return nil, ErrEntryNotFound + } + + return entry, nil +} + +// Files returns a FileIter allowing to iterate over the Tree +func (t *Tree) Files() *FileIter { + return NewFileIter(t.s, t) +} + +// ID returns the object ID of the tree. The returned value will always match +// the current value of Tree.Hash. +// +// ID is present to fulfill the Object interface. +func (t *Tree) ID() plumbing.Hash { + return t.Hash +} + +// Type returns the type of object. It always returns plumbing.TreeObject. +func (t *Tree) Type() plumbing.ObjectType { + return plumbing.TreeObject +} + +// Decode transform an plumbing.EncodedObject into a Tree struct +func (t *Tree) Decode(o plumbing.EncodedObject) (err error) { + if o.Type() != plumbing.TreeObject { + return ErrUnsupportedObject + } + + t.Hash = o.Hash() + if o.Size() == 0 { + return nil + } + + t.Entries = nil + t.m = nil + + reader, err := o.Reader() + if err != nil { + return err + } + defer ioutil.CheckClose(reader, &err) + + r := bufPool.Get().(*bufio.Reader) + defer bufPool.Put(r) + r.Reset(reader) + for { + str, err := r.ReadString(' ') + if err != nil { + if err == io.EOF { + break + } + + return err + } + str = str[:len(str)-1] // strip last byte (' ') + + mode, err := filemode.New(str) + if err != nil { + return err + } + + name, err := r.ReadString(0) + if err != nil && err != io.EOF { + return err + } + + var hash plumbing.Hash + if _, err = io.ReadFull(r, hash[:]); err != nil { + return err + } + + baseName := name[:len(name)-1] + t.Entries = append(t.Entries, TreeEntry{ + Hash: hash, + Mode: mode, + Name: baseName, + }) + } + + return nil +} + +// Encode transforms a Tree into a plumbing.EncodedObject. +func (t *Tree) Encode(o plumbing.EncodedObject) (err error) { + o.SetType(plumbing.TreeObject) + w, err := o.Writer() + if err != nil { + return err + } + + defer ioutil.CheckClose(w, &err) + for _, entry := range t.Entries { + if _, err = fmt.Fprintf(w, "%o %s", entry.Mode, entry.Name); err != nil { + return err + } + + if _, err = w.Write([]byte{0x00}); err != nil { + return err + } + + if _, err = w.Write(entry.Hash[:]); err != nil { + return err + } + } + + return err +} + +func (t *Tree) buildMap() { + t.m = make(map[string]*TreeEntry) + for i := 0; i < len(t.Entries); i++ { + t.m[t.Entries[i].Name] = &t.Entries[i] + } +} + +// Diff returns a list of changes between this tree and the provided one +func (t *Tree) Diff(to *Tree) (Changes, error) { + return t.DiffContext(context.Background(), to) +} + +// DiffContext returns a list of changes between this tree and the provided one +// Error will be returned if context expires. Provided context must be non nil. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (t *Tree) DiffContext(ctx context.Context, to *Tree) (Changes, error) { + return DiffTreeWithOptions(ctx, t, to, DefaultDiffTreeOptions) +} + +// Patch returns a slice of Patch objects with all the changes between trees +// in chunks. This representation can be used to create several diff outputs. +func (t *Tree) Patch(to *Tree) (*Patch, error) { + return t.PatchContext(context.Background(), to) +} + +// PatchContext returns a slice of Patch objects with all the changes between +// trees in chunks. This representation can be used to create several diff +// outputs. If context expires, an error will be returned. Provided context must +// be non-nil. +// +// NOTE: Since version 5.1.0 the renames are correctly handled, the settings +// used are the recommended options DefaultDiffTreeOptions. +func (t *Tree) PatchContext(ctx context.Context, to *Tree) (*Patch, error) { + changes, err := t.DiffContext(ctx, to) + if err != nil { + return nil, err + } + + return changes.PatchContext(ctx) +} + +// treeEntryIter facilitates iterating through the TreeEntry objects in a Tree. +type treeEntryIter struct { + t *Tree + pos int +} + +func (iter *treeEntryIter) Next() (TreeEntry, error) { + if iter.pos >= len(iter.t.Entries) { + return TreeEntry{}, io.EOF + } + iter.pos++ + return iter.t.Entries[iter.pos-1], nil +} + +// TreeWalker provides a means of walking through all of the entries in a Tree. +type TreeWalker struct { + stack []*treeEntryIter + base string + recursive bool + seen map[plumbing.Hash]bool + + s storer.EncodedObjectStorer + t *Tree +} + +// NewTreeWalker returns a new TreeWalker for the given tree. +// +// It is the caller's responsibility to call Close() when finished with the +// tree walker. +func NewTreeWalker(t *Tree, recursive bool, seen map[plumbing.Hash]bool) *TreeWalker { + stack := make([]*treeEntryIter, 0, startingStackSize) + stack = append(stack, &treeEntryIter{t, 0}) + + return &TreeWalker{ + stack: stack, + recursive: recursive, + seen: seen, + + s: t.s, + t: t, + } +} + +// Next returns the next object from the tree. Objects are returned in order +// and subtrees are included. After the last object has been returned further +// calls to Next() will return io.EOF. +// +// In the current implementation any objects which cannot be found in the +// underlying repository will be skipped automatically. It is possible that this +// may change in future versions. +func (w *TreeWalker) Next() (name string, entry TreeEntry, err error) { + var obj *Tree + for { + current := len(w.stack) - 1 + if current < 0 { + // Nothing left on the stack so we're finished + err = io.EOF + return + } + + if current > maxTreeDepth { + // We're probably following bad data or some self-referencing tree + err = ErrMaxTreeDepth + return + } + + entry, err = w.stack[current].Next() + if err == io.EOF { + // Finished with the current tree, move back up to the parent + w.stack = w.stack[:current] + w.base, _ = path.Split(w.base) + w.base = strings.TrimSuffix(w.base, "/") + continue + } + + if err != nil { + return + } + + if w.seen[entry.Hash] { + continue + } + + if entry.Mode == filemode.Dir { + obj, err = GetTree(w.s, entry.Hash) + } + + name = simpleJoin(w.base, entry.Name) + + if err != nil { + err = io.EOF + return + } + + break + } + + if !w.recursive { + return + } + + if obj != nil { + w.stack = append(w.stack, &treeEntryIter{obj, 0}) + w.base = simpleJoin(w.base, entry.Name) + } + + return +} + +// Tree returns the tree that the tree walker most recently operated on. +func (w *TreeWalker) Tree() *Tree { + current := len(w.stack) - 1 + if w.stack[current].pos == 0 { + current-- + } + + if current < 0 { + return nil + } + + return w.stack[current].t +} + +// Close releases any resources used by the TreeWalker. +func (w *TreeWalker) Close() { + w.stack = nil +} + +// TreeIter provides an iterator for a set of trees. +type TreeIter struct { + storer.EncodedObjectIter + s storer.EncodedObjectStorer +} + +// NewTreeIter takes a storer.EncodedObjectStorer and a +// storer.EncodedObjectIter and returns a *TreeIter that iterates over all +// tree contained in the storer.EncodedObjectIter. +// +// Any non-tree object returned by the storer.EncodedObjectIter is skipped. +func NewTreeIter(s storer.EncodedObjectStorer, iter storer.EncodedObjectIter) *TreeIter { + return &TreeIter{iter, s} +} + +// Next moves the iterator to the next tree and returns a pointer to it. If +// there are no more trees, it returns io.EOF. +func (iter *TreeIter) Next() (*Tree, error) { + for { + obj, err := iter.EncodedObjectIter.Next() + if err != nil { + return nil, err + } + + if obj.Type() != plumbing.TreeObject { + continue + } + + return DecodeTree(iter.s, obj) + } +} + +// ForEach call the cb function for each tree contained on this iter until +// an error happens or the end of the iter is reached. If ErrStop is sent +// the iteration is stop but no error is returned. The iterator is closed. +func (iter *TreeIter) ForEach(cb func(*Tree) error) error { + return iter.EncodedObjectIter.ForEach(func(obj plumbing.EncodedObject) error { + if obj.Type() != plumbing.TreeObject { + return nil + } + + t, err := DecodeTree(iter.s, obj) + if err != nil { + return err + } + + return cb(t) + }) +} + +func simpleJoin(parent, child string) string { + if len(parent) > 0 { + return parent + "/" + child + } + return child +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go new file mode 100644 index 0000000000..b4891b957c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/object/treenoder.go @@ -0,0 +1,136 @@ +package object + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/filemode" + "github.com/go-git/go-git/v5/utils/merkletrie/noder" +) + +// A treenoder is a helper type that wraps git trees into merkletrie +// noders. +// +// As a merkletrie noder doesn't understand the concept of modes (e.g. +// file permissions), the treenoder includes the mode of the git tree in +// the hash, so changes in the modes will be detected as modifications +// to the file contents by the merkletrie difftree algorithm. This is +// consistent with how the "git diff-tree" command works. +type treeNoder struct { + parent *Tree // the root node is its own parent + name string // empty string for the root node + mode filemode.FileMode + hash plumbing.Hash + children []noder.Noder // memoized +} + +// NewTreeRootNode returns the root node of a Tree +func NewTreeRootNode(t *Tree) noder.Noder { + if t == nil { + return &treeNoder{} + } + + return &treeNoder{ + parent: t, + name: "", + mode: filemode.Dir, + hash: t.Hash, + } +} + +func (t *treeNoder) isRoot() bool { + return t.name == "" +} + +func (t *treeNoder) String() string { + return "treeNoder <" + t.name + ">" +} + +func (t *treeNoder) Hash() []byte { + if t.mode == filemode.Deprecated { + return append(t.hash[:], filemode.Regular.Bytes()...) + } + return append(t.hash[:], t.mode.Bytes()...) +} + +func (t *treeNoder) Name() string { + return t.name +} + +func (t *treeNoder) IsDir() bool { + return t.mode == filemode.Dir +} + +// Children will return the children of a treenoder as treenoders, +// building them from the children of the wrapped git tree. +func (t *treeNoder) Children() ([]noder.Noder, error) { + if t.mode != filemode.Dir { + return noder.NoChildren, nil + } + + // children are memoized for efficiency + if t.children != nil { + return t.children, nil + } + + // the parent of the returned children will be ourself as a tree if + // we are a not the root treenoder. The root is special as it + // is is own parent. + parent := t.parent + if !t.isRoot() { + var err error + if parent, err = t.parent.Tree(t.name); err != nil { + return nil, err + } + } + + return transformChildren(parent) +} + +// Returns the children of a tree as treenoders. +// Efficiency is key here. +func transformChildren(t *Tree) ([]noder.Noder, error) { + var err error + var e TreeEntry + + // there will be more tree entries than children in the tree, + // due to submodules and empty directories, but I think it is still + // worth it to pre-allocate the whole array now, even if sometimes + // is bigger than needed. + ret := make([]noder.Noder, 0, len(t.Entries)) + + walker := NewTreeWalker(t, false, nil) // don't recurse + // don't defer walker.Close() for efficiency reasons. + for { + _, e, err = walker.Next() + if err == io.EOF { + break + } + if err != nil { + walker.Close() + return nil, err + } + + ret = append(ret, &treeNoder{ + parent: t, + name: e.Name, + mode: e.Mode, + hash: e.Hash, + }) + } + walker.Close() + + return ret, nil +} + +// len(t.tree.Entries) != the number of elements walked by treewalker +// for some reason because of empty directories, submodules, etc, so we +// have to walk here. +func (t *treeNoder) NumChildren() (int, error) { + children, err := t.Children() + if err != nil { + return 0, err + } + + return len(children), nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go new file mode 100644 index 0000000000..1bd724cad5 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs.go @@ -0,0 +1,211 @@ +package packp + +import ( + "fmt" + "sort" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" + "github.com/go-git/go-git/v5/plumbing/storer" + "github.com/go-git/go-git/v5/storage/memory" +) + +// AdvRefs values represent the information transmitted on an +// advertised-refs message. Values from this type are not zero-value +// safe, use the New function instead. +type AdvRefs struct { + // Prefix stores prefix payloads. + // + // When using this message over (smart) HTTP, you have to add a pktline + // before the whole thing with the following payload: + // + // '# service=$servicename" LF + // + // Moreover, some (all) git HTTP smart servers will send a flush-pkt + // just after the first pkt-line. + // + // To accommodate both situations, the Prefix field allow you to store + // any data you want to send before the actual pktlines. It will also + // be filled up with whatever is found on the line. + Prefix [][]byte + // Head stores the resolved HEAD reference if present. + // This can be present with git-upload-pack, not with git-receive-pack. + Head *plumbing.Hash + // Capabilities are the capabilities. + Capabilities *capability.List + // References are the hash references. + References map[string]plumbing.Hash + // Peeled are the peeled hash references. + Peeled map[string]plumbing.Hash + // Shallows are the shallow object ids. + Shallows []plumbing.Hash +} + +// NewAdvRefs returns a pointer to a new AdvRefs value, ready to be used. +func NewAdvRefs() *AdvRefs { + return &AdvRefs{ + Prefix: [][]byte{}, + Capabilities: capability.NewList(), + References: make(map[string]plumbing.Hash), + Peeled: make(map[string]plumbing.Hash), + Shallows: []plumbing.Hash{}, + } +} + +func (a *AdvRefs) AddReference(r *plumbing.Reference) error { + switch r.Type() { + case plumbing.SymbolicReference: + v := fmt.Sprintf("%s:%s", r.Name().String(), r.Target().String()) + a.Capabilities.Add(capability.SymRef, v) + case plumbing.HashReference: + a.References[r.Name().String()] = r.Hash() + default: + return plumbing.ErrInvalidType + } + + return nil +} + +func (a *AdvRefs) AllReferences() (memory.ReferenceStorage, error) { + s := memory.ReferenceStorage{} + if err := a.addRefs(s); err != nil { + return s, plumbing.NewUnexpectedError(err) + } + + return s, nil +} + +func (a *AdvRefs) addRefs(s storer.ReferenceStorer) error { + for name, hash := range a.References { + ref := plumbing.NewReferenceFromStrings(name, hash.String()) + if err := s.SetReference(ref); err != nil { + return err + } + } + + if a.supportSymrefs() { + return a.addSymbolicRefs(s) + } + + return a.resolveHead(s) +} + +// If the server does not support symrefs capability, +// we need to guess the reference where HEAD is pointing to. +// +// Git versions prior to 1.8.4.3 has an special procedure to get +// the reference where is pointing to HEAD: +// - Check if a reference called master exists. If exists and it +// has the same hash as HEAD hash, we can say that HEAD is pointing to master +// - If master does not exists or does not have the same hash as HEAD, +// order references and check in that order if that reference has the same +// hash than HEAD. If yes, set HEAD pointing to that branch hash +// - If no reference is found, throw an error +func (a *AdvRefs) resolveHead(s storer.ReferenceStorer) error { + if a.Head == nil { + return nil + } + + ref, err := s.Reference(plumbing.Master) + + // check first if HEAD is pointing to master + if err == nil { + ok, err := a.createHeadIfCorrectReference(ref, s) + if err != nil { + return err + } + + if ok { + return nil + } + } + + if err != nil && err != plumbing.ErrReferenceNotFound { + return err + } + + // From here we are trying to guess the branch that HEAD is pointing + refIter, err := s.IterReferences() + if err != nil { + return err + } + + var refNames []string + err = refIter.ForEach(func(r *plumbing.Reference) error { + refNames = append(refNames, string(r.Name())) + return nil + }) + if err != nil { + return err + } + + sort.Strings(refNames) + + var headSet bool + for _, refName := range refNames { + ref, err := s.Reference(plumbing.ReferenceName(refName)) + if err != nil { + return err + } + ok, err := a.createHeadIfCorrectReference(ref, s) + if err != nil { + return err + } + if ok { + headSet = true + break + } + } + + if !headSet { + return plumbing.ErrReferenceNotFound + } + + return nil +} + +func (a *AdvRefs) createHeadIfCorrectReference( + reference *plumbing.Reference, + s storer.ReferenceStorer) (bool, error) { + if reference.Hash() == *a.Head { + headRef := plumbing.NewSymbolicReference(plumbing.HEAD, reference.Name()) + if err := s.SetReference(headRef); err != nil { + return false, err + } + + return true, nil + } + + return false, nil +} + +func (a *AdvRefs) addSymbolicRefs(s storer.ReferenceStorer) error { + for _, symref := range a.Capabilities.Get(capability.SymRef) { + chunks := strings.Split(symref, ":") + if len(chunks) != 2 { + err := fmt.Errorf("bad number of `:` in symref value (%q)", symref) + return plumbing.NewUnexpectedError(err) + } + name := plumbing.ReferenceName(chunks[0]) + target := plumbing.ReferenceName(chunks[1]) + ref := plumbing.NewSymbolicReference(name, target) + if err := s.SetReference(ref); err != nil { + return nil + } + } + + return nil +} + +func (a *AdvRefs) supportSymrefs() bool { + return a.Capabilities.Supports(capability.SymRef) +} + +// IsEmpty returns true if doesn't contain any reference. +func (a *AdvRefs) IsEmpty() bool { + return a.Head == nil && + len(a.References) == 0 && + len(a.Peeled) == 0 && + len(a.Shallows) == 0 +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go new file mode 100644 index 0000000000..63bbe5ab16 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_decode.go @@ -0,0 +1,288 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Decode reads the next advertised-refs message form its input and +// stores it in the AdvRefs. +func (a *AdvRefs) Decode(r io.Reader) error { + d := newAdvRefsDecoder(r) + return d.Decode(a) +} + +type advRefsDecoder struct { + s *pktline.Scanner // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + hash plumbing.Hash // last hash read + err error // sticky error, use the parser.error() method to fill this out + data *AdvRefs // parsed data is stored here +} + +var ( + // ErrEmptyAdvRefs is returned by Decode if it gets an empty advertised + // references message. + ErrEmptyAdvRefs = errors.New("empty advertised-ref message") + // ErrEmptyInput is returned by Decode if the input is empty. + ErrEmptyInput = errors.New("empty input") +) + +func newAdvRefsDecoder(r io.Reader) *advRefsDecoder { + return &advRefsDecoder{ + s: pktline.NewScanner(r), + } +} + +func (d *advRefsDecoder) Decode(v *AdvRefs) error { + d.data = v + + for state := decodePrefix; state != nil; { + state = state(d) + } + + return d.err +} + +type decoderStateFn func(*advRefsDecoder) decoderStateFn + +// fills out the parser sticky error +func (d *advRefsDecoder) error(format string, a ...interface{}) { + msg := fmt.Sprintf( + "pkt-line %d: %s", d.nLine, + fmt.Sprintf(format, a...), + ) + + d.err = NewErrUnexpectedData(msg, d.line) +} + +// Reads a new pkt-line from the scanner, makes its payload available as +// p.line and increments p.nLine. A successful invocation returns true, +// otherwise, false is returned and the sticky error is filled out +// accordingly. Trims eols at the end of the payloads. +func (d *advRefsDecoder) nextLine() bool { + d.nLine++ + + if !d.s.Scan() { + if d.err = d.s.Err(); d.err != nil { + return false + } + + if d.nLine == 1 { + d.err = ErrEmptyInput + return false + } + + d.error("EOF") + return false + } + + d.line = d.s.Bytes() + d.line = bytes.TrimSuffix(d.line, eol) + + return true +} + +// The HTTP smart prefix is often followed by a flush-pkt. +func decodePrefix(d *advRefsDecoder) decoderStateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if !isPrefix(d.line) { + return decodeFirstHash + } + + tmp := make([]byte, len(d.line)) + copy(tmp, d.line) + d.data.Prefix = append(d.data.Prefix, tmp) + if ok := d.nextLine(); !ok { + return nil + } + + if !isFlush(d.line) { + return decodeFirstHash + } + + d.data.Prefix = append(d.data.Prefix, pktline.Flush) + if ok := d.nextLine(); !ok { + return nil + } + + return decodeFirstHash +} + +func isPrefix(payload []byte) bool { + return len(payload) > 0 && payload[0] == '#' +} + +// If the first hash is zero, then a no-refs is coming. Otherwise, a +// list-of-refs is coming, and the hash will be followed by the first +// advertised ref. +func decodeFirstHash(p *advRefsDecoder) decoderStateFn { + // If the repository is empty, we receive a flush here (HTTP). + if isFlush(p.line) { + p.err = ErrEmptyAdvRefs + return nil + } + + if len(p.line) < hashSize { + p.error("cannot read hash, pkt-line too short") + return nil + } + + if _, err := hex.Decode(p.hash[:], p.line[:hashSize]); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.line = p.line[hashSize:] + + if p.hash.IsZero() { + return decodeSkipNoRefs + } + + return decodeFirstRef +} + +// Skips SP "capabilities^{}" NUL +func decodeSkipNoRefs(p *advRefsDecoder) decoderStateFn { + if len(p.line) < len(noHeadMark) { + p.error("too short zero-id ref") + return nil + } + + if !bytes.HasPrefix(p.line, noHeadMark) { + p.error("malformed zero-id ref") + return nil + } + + p.line = p.line[len(noHeadMark):] + + return decodeCaps +} + +// decode the refname, expects SP refname NULL +func decodeFirstRef(l *advRefsDecoder) decoderStateFn { + if len(l.line) < 3 { + l.error("line too short after hash") + return nil + } + + if !bytes.HasPrefix(l.line, sp) { + l.error("no space after hash") + return nil + } + l.line = l.line[1:] + + chunks := bytes.SplitN(l.line, null, 2) + if len(chunks) < 2 { + l.error("NULL not found") + return nil + } + ref := chunks[0] + l.line = chunks[1] + + if bytes.Equal(ref, []byte(head)) { + l.data.Head = &l.hash + } else { + l.data.References[string(ref)] = l.hash + } + + return decodeCaps +} + +func decodeCaps(p *advRefsDecoder) decoderStateFn { + if err := p.data.Capabilities.Decode(p.line); err != nil { + p.error("invalid capabilities: %s", err) + return nil + } + + return decodeOtherRefs +} + +// The refs are either tips (obj-id SP refname) or a peeled (obj-id SP refname^{}). +// If there are no refs, then there might be a shallow or flush-ptk. +func decodeOtherRefs(p *advRefsDecoder) decoderStateFn { + if ok := p.nextLine(); !ok { + return nil + } + + if bytes.HasPrefix(p.line, shallow) { + return decodeShallow + } + + if len(p.line) == 0 { + return nil + } + + saveTo := p.data.References + if bytes.HasSuffix(p.line, peeled) { + p.line = bytes.TrimSuffix(p.line, peeled) + saveTo = p.data.Peeled + } + + ref, hash, err := readRef(p.line) + if err != nil { + p.error("%s", err) + return nil + } + saveTo[ref] = hash + + return decodeOtherRefs +} + +// Reads a ref-name +func readRef(data []byte) (string, plumbing.Hash, error) { + chunks := bytes.Split(data, sp) + switch { + case len(chunks) == 1: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: no space was found") + case len(chunks) > 2: + return "", plumbing.ZeroHash, fmt.Errorf("malformed ref data: more than one space found") + default: + return string(chunks[1]), plumbing.NewHash(string(chunks[0])), nil + } +} + +// Keeps reading shallows until a flush-pkt is found +func decodeShallow(p *advRefsDecoder) decoderStateFn { + if !bytes.HasPrefix(p.line, shallow) { + p.error("malformed shallow prefix, found %q... instead", p.line[:len(shallow)]) + return nil + } + p.line = bytes.TrimPrefix(p.line, shallow) + + if len(p.line) != hashSize { + p.error(fmt.Sprintf( + "malformed shallow hash: wrong length, expected 40 bytes, read %d bytes", + len(p.line))) + return nil + } + + text := p.line[:hashSize] + var h plumbing.Hash + if _, err := hex.Decode(h[:], text); err != nil { + p.error("invalid hash text: %s", err) + return nil + } + + p.data.Shallows = append(p.data.Shallows, h) + + if ok := p.nextLine(); !ok { + return nil + } + + if len(p.line) == 0 { + return nil // successful parse of the advertised-refs message + } + + return decodeShallow +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go new file mode 100644 index 0000000000..fb9bd883fc --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/advrefs_encode.go @@ -0,0 +1,176 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + "sort" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" +) + +// Encode writes the AdvRefs encoding to a writer. +// +// All the payloads will end with a newline character. Capabilities, +// references and shallows are written in alphabetical order, except for +// peeled references that always follow their corresponding references. +func (a *AdvRefs) Encode(w io.Writer) error { + e := newAdvRefsEncoder(w) + return e.Encode(a) +} + +type advRefsEncoder struct { + data *AdvRefs // data to encode + pe *pktline.Encoder // where to write the encoded data + firstRefName string // reference name to encode in the first pkt-line (HEAD if present) + firstRefHash plumbing.Hash // hash referenced to encode in the first pkt-line (HEAD if present) + sortedRefs []string // hash references to encode ordered by increasing order + err error // sticky error + +} + +func newAdvRefsEncoder(w io.Writer) *advRefsEncoder { + return &advRefsEncoder{ + pe: pktline.NewEncoder(w), + } +} + +func (e *advRefsEncoder) Encode(v *AdvRefs) error { + e.data = v + e.sortRefs() + e.setFirstRef() + + for state := encodePrefix; state != nil; { + state = state(e) + } + + return e.err +} + +func (e *advRefsEncoder) sortRefs() { + if len(e.data.References) > 0 { + refs := make([]string, 0, len(e.data.References)) + for refName := range e.data.References { + refs = append(refs, refName) + } + + sort.Strings(refs) + e.sortedRefs = refs + } +} + +func (e *advRefsEncoder) setFirstRef() { + if e.data.Head != nil { + e.firstRefName = head + e.firstRefHash = *e.data.Head + return + } + + if len(e.sortedRefs) > 0 { + refName := e.sortedRefs[0] + e.firstRefName = refName + e.firstRefHash = e.data.References[refName] + } +} + +type encoderStateFn func(*advRefsEncoder) encoderStateFn + +func encodePrefix(e *advRefsEncoder) encoderStateFn { + for _, p := range e.data.Prefix { + if bytes.Equal(p, pktline.Flush) { + if e.err = e.pe.Flush(); e.err != nil { + return nil + } + continue + } + if e.err = e.pe.Encodef("%s\n", string(p)); e.err != nil { + return nil + } + } + + return encodeFirstLine +} + +// Adds the first pkt-line payload: head hash, head ref and capabilities. +// If HEAD ref is not found, the first reference ordered in increasing order will be used. +// If there aren't HEAD neither refs, the first line will be "PKT-LINE(zero-id SP "capabilities^{}" NUL capability-list)". +// See: https://github.com/git/git/blob/master/Documentation/technical/pack-protocol.txt +// See: https://github.com/git/git/blob/master/Documentation/technical/protocol-common.txt +func encodeFirstLine(e *advRefsEncoder) encoderStateFn { + const formatFirstLine = "%s %s\x00%s\n" + var firstLine string + capabilities := formatCaps(e.data.Capabilities) + + if e.firstRefName == "" { + firstLine = fmt.Sprintf(formatFirstLine, plumbing.ZeroHash.String(), "capabilities^{}", capabilities) + } else { + firstLine = fmt.Sprintf(formatFirstLine, e.firstRefHash.String(), e.firstRefName, capabilities) + + } + + if e.err = e.pe.EncodeString(firstLine); e.err != nil { + return nil + } + + return encodeRefs +} + +func formatCaps(c *capability.List) string { + if c == nil { + return "" + } + + return c.String() +} + +// Adds the (sorted) refs: hash SP refname EOL +// and their peeled refs if any. +func encodeRefs(e *advRefsEncoder) encoderStateFn { + for _, r := range e.sortedRefs { + if r == e.firstRefName { + continue + } + + hash := e.data.References[r] + if e.err = e.pe.Encodef("%s %s\n", hash.String(), r); e.err != nil { + return nil + } + + if hash, ok := e.data.Peeled[r]; ok { + if e.err = e.pe.Encodef("%s %s^{}\n", hash.String(), r); e.err != nil { + return nil + } + } + } + + return encodeShallow +} + +// Adds the (sorted) shallows: "shallow" SP hash EOL +func encodeShallow(e *advRefsEncoder) encoderStateFn { + sorted := sortShallows(e.data.Shallows) + for _, hash := range sorted { + if e.err = e.pe.Encodef("shallow %s\n", hash); e.err != nil { + return nil + } + } + + return encodeFlush +} + +func sortShallows(c []plumbing.Hash) []string { + ret := []string{} + for _, h := range c { + ret = append(ret, h.String()) + } + sort.Strings(ret) + + return ret +} + +func encodeFlush(e *advRefsEncoder) encoderStateFn { + e.err = e.pe.Flush() + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go new file mode 100644 index 0000000000..8d6a56f532 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/capability.go @@ -0,0 +1,259 @@ +// Package capability defines the server and client capabilities. +package capability + +// Capability describes a server or client capability. +type Capability string + +func (n Capability) String() string { + return string(n) +} + +const ( + // MultiACK capability allows the server to return "ACK obj-id continue" as + // soon as it finds a commit that it can use as a common base, between the + // client's wants and the client's have set. + // + // By sending this early, the server can potentially head off the client + // from walking any further down that particular branch of the client's + // repository history. The client may still need to walk down other + // branches, sending have lines for those, until the server has a + // complete cut across the DAG, or the client has said "done". + // + // Without multi_ack, a client sends have lines in --date-order until + // the server has found a common base. That means the client will send + // have lines that are already known by the server to be common, because + // they overlap in time with another branch that the server hasn't found + // a common base on yet. + // + // For example suppose the client has commits in caps that the server + // doesn't and the server has commits in lower case that the client + // doesn't, as in the following diagram: + // + // +---- u ---------------------- x + // / +----- y + // / / + // a -- b -- c -- d -- E -- F + // \ + // +--- Q -- R -- S + // + // If the client wants x,y and starts out by saying have F,S, the server + // doesn't know what F,S is. Eventually the client says "have d" and + // the server sends "ACK d continue" to let the client know to stop + // walking down that line (so don't send c-b-a), but it's not done yet, + // it needs a base for x. The client keeps going with S-R-Q, until a + // gets reached, at which point the server has a clear base and it all + // ends. + // + // Without multi_ack the client would have sent that c-b-a chain anyway, + // interleaved with S-R-Q. + MultiACK Capability = "multi_ack" + // MultiACKDetailed is an extension of multi_ack that permits client to + // better understand the server's in-memory state. + MultiACKDetailed Capability = "multi_ack_detailed" + // NoDone should only be used with the smart HTTP protocol. If + // multi_ack_detailed and no-done are both present, then the sender is + // free to immediately send a pack following its first "ACK obj-id ready" + // message. + // + // Without no-done in the smart HTTP protocol, the server session would + // end and the client has to make another trip to send "done" before + // the server can send the pack. no-done removes the last round and + // thus slightly reduces latency. + NoDone Capability = "no-done" + // ThinPack is one with deltas which reference base objects not + // contained within the pack (but are known to exist at the receiving + // end). This can reduce the network traffic significantly, but it + // requires the receiving end to know how to "thicken" these packs by + // adding the missing bases to the pack. + // + // The upload-pack server advertises 'thin-pack' when it can generate + // and send a thin pack. A client requests the 'thin-pack' capability + // when it understands how to "thicken" it, notifying the server that + // it can receive such a pack. A client MUST NOT request the + // 'thin-pack' capability if it cannot turn a thin pack into a + // self-contained pack. + // + // Receive-pack, on the other hand, is assumed by default to be able to + // handle thin packs, but can ask the client not to use the feature by + // advertising the 'no-thin' capability. A client MUST NOT send a thin + // pack if the server advertises the 'no-thin' capability. + // + // The reasons for this asymmetry are historical. The receive-pack + // program did not exist until after the invention of thin packs, so + // historically the reference implementation of receive-pack always + // understood thin packs. Adding 'no-thin' later allowed receive-pack + // to disable the feature in a backwards-compatible manner. + ThinPack Capability = "thin-pack" + // Sideband means that server can send, and client understand multiplexed + // progress reports and error info interleaved with the packfile itself. + // + // These two options are mutually exclusive. A modern client always + // favors Sideband64k. + // + // Either mode indicates that the packfile data will be streamed broken + // up into packets of up to either 1000 bytes in the case of 'side_band', + // or 65520 bytes in the case of 'side_band_64k'. Each packet is made up + // of a leading 4-byte pkt-line length of how much data is in the packet, + // followed by a 1-byte stream code, followed by the actual data. + // + // The stream code can be one of: + // + // 1 - pack data + // 2 - progress messages + // 3 - fatal error message just before stream aborts + // + // The "side-band-64k" capability came about as a way for newer clients + // that can handle much larger packets to request packets that are + // actually crammed nearly full, while maintaining backward compatibility + // for the older clients. + // + // Further, with side-band and its up to 1000-byte messages, it's actually + // 999 bytes of payload and 1 byte for the stream code. With side-band-64k, + // same deal, you have up to 65519 bytes of data and 1 byte for the stream + // code. + // + // The client MUST send only maximum of one of "side-band" and "side- + // band-64k". Server MUST diagnose it as an error if client requests + // both. + Sideband Capability = "side-band" + Sideband64k Capability = "side-band-64k" + // OFSDelta server can send, and client understand PACKv2 with delta + // referring to its base by position in pack rather than by an obj-id. That + // is, they can send/read OBJ_OFS_DELTA (aka type 6) in a packfile. + OFSDelta Capability = "ofs-delta" + // Agent the server may optionally send this capability to notify the client + // that the server is running version `X`. The client may optionally return + // its own agent string by responding with an `agent=Y` capability (but it + // MUST NOT do so if the server did not mention the agent capability). The + // `X` and `Y` strings may contain any printable ASCII characters except + // space (i.e., the byte range 32 < x < 127), and are typically of the form + // "package/version" (e.g., "git/1.8.3.1"). The agent strings are purely + // informative for statistics and debugging purposes, and MUST NOT be used + // to programmatically assume the presence or absence of particular features. + Agent Capability = "agent" + // Shallow capability adds "deepen", "shallow" and "unshallow" commands to + // the fetch-pack/upload-pack protocol so clients can request shallow + // clones. + Shallow Capability = "shallow" + // DeepenSince adds "deepen-since" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific time, instead of depth. Internally it's equivalent of doing + // "rev-list --max-age=" on the server side. "deepen-since" + // cannot be used with "deepen". + DeepenSince Capability = "deepen-since" + // DeepenNot adds "deepen-not" command to fetch-pack/upload-pack + // protocol so the client can request shallow clones that are cut at a + // specific revision, instead of depth. Internally it's equivalent of + // doing "rev-list --not " on the server side. "deepen-not" + // cannot be used with "deepen", but can be used with "deepen-since". + DeepenNot Capability = "deepen-not" + // DeepenRelative if this capability is requested by the client, the + // semantics of "deepen" command is changed. The "depth" argument is the + // depth from the current shallow boundary, instead of the depth from + // remote refs. + DeepenRelative Capability = "deepen-relative" + // NoProgress the client was started with "git clone -q" or something, and + // doesn't want that side band 2. Basically the client just says "I do not + // wish to receive stream 2 on sideband, so do not send it to me, and if + // you did, I will drop it on the floor anyway". However, the sideband + // channel 3 is still used for error responses. + NoProgress Capability = "no-progress" + // IncludeTag capability is about sending annotated tags if we are + // sending objects they point to. If we pack an object to the client, and + // a tag object points exactly at that object, we pack the tag object too. + // In general this allows a client to get all new annotated tags when it + // fetches a branch, in a single network connection. + // + // Clients MAY always send include-tag, hardcoding it into a request when + // the server advertises this capability. The decision for a client to + // request include-tag only has to do with the client's desires for tag + // data, whether or not a server had advertised objects in the + // refs/tags/* namespace. + // + // Servers MUST pack the tags if their referrant is packed and the client + // has requested include-tags. + // + // Clients MUST be prepared for the case where a server has ignored + // include-tag and has not actually sent tags in the pack. In such + // cases the client SHOULD issue a subsequent fetch to acquire the tags + // that include-tag would have otherwise given the client. + // + // The server SHOULD send include-tag, if it supports it, regardless + // of whether or not there are tags available. + IncludeTag Capability = "include-tag" + // ReportStatus the receive-pack process can receive a 'report-status' + // capability, which tells it that the client wants a report of what + // happened after a packfile upload and reference update. If the pushing + // client requests this capability, after unpacking and updating references + // the server will respond with whether the packfile unpacked successfully + // and if each reference was updated successfully. If any of those were not + // successful, it will send back an error message. See pack-protocol.txt + // for example messages. + ReportStatus Capability = "report-status" + // DeleteRefs If the server sends back this capability, it means that + // it is capable of accepting a zero-id value as the target + // value of a reference update. It is not sent back by the client, it + // simply informs the client that it can be sent zero-id values + // to delete references + DeleteRefs Capability = "delete-refs" + // Quiet If the receive-pack server advertises this capability, it is + // capable of silencing human-readable progress output which otherwise may + // be shown when processing the received pack. A send-pack client should + // respond with the 'quiet' capability to suppress server-side progress + // reporting if the local progress reporting is also being suppressed + // (e.g., via `push -q`, or if stderr does not go to a tty). + Quiet Capability = "quiet" + // Atomic If the server sends this capability it is capable of accepting + // atomic pushes. If the pushing client requests this capability, the server + // will update the refs in one atomic transaction. Either all refs are + // updated or none. + Atomic Capability = "atomic" + // PushOptions If the server sends this capability it is able to accept + // push options after the update commands have been sent, but before the + // packfile is streamed. If the pushing client requests this capability, + // the server will pass the options to the pre- and post- receive hooks + // that process this push request. + PushOptions Capability = "push-options" + // AllowTipSHA1InWant if the upload-pack server advertises this capability, + // fetch-pack may send "want" lines with SHA-1s that exist at the server but + // are not advertised by upload-pack. + AllowTipSHA1InWant Capability = "allow-tip-sha1-in-want" + // AllowReachableSHA1InWant if the upload-pack server advertises this + // capability, fetch-pack may send "want" lines with SHA-1s that exist at + // the server but are not advertised by upload-pack. + AllowReachableSHA1InWant Capability = "allow-reachable-sha1-in-want" + // PushCert the receive-pack server that advertises this capability is + // willing to accept a signed push certificate, and asks the to be + // included in the push certificate. A send-pack client MUST NOT + // send a push-cert packet unless the receive-pack server advertises + // this capability. + PushCert Capability = "push-cert" + // SymRef symbolic reference support for better negotiation. + SymRef Capability = "symref" + // ObjectFormat takes a hash algorithm as an argument, indicates that the + // server supports the given hash algorithms. + ObjectFormat Capability = "object-format" + // Filter if present, fetch-pack may send "filter" commands to request a + // partial clone or partial fetch and request that the server omit various objects from the packfile + Filter Capability = "filter" +) + +const DefaultAgent = "go-git/4.x" + +var known = map[Capability]bool{ + MultiACK: true, MultiACKDetailed: true, NoDone: true, ThinPack: true, + Sideband: true, Sideband64k: true, OFSDelta: true, Agent: true, + Shallow: true, DeepenSince: true, DeepenNot: true, DeepenRelative: true, + NoProgress: true, IncludeTag: true, ReportStatus: true, DeleteRefs: true, + Quiet: true, Atomic: true, PushOptions: true, AllowTipSHA1InWant: true, + AllowReachableSHA1InWant: true, PushCert: true, SymRef: true, + ObjectFormat: true, Filter: true, +} + +var requiresArgument = map[Capability]bool{ + Agent: true, PushCert: true, SymRef: true, ObjectFormat: true, +} + +var multipleArgument = map[Capability]bool{ + SymRef: true, +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go new file mode 100644 index 0000000000..f41ec799cf --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/capability/list.go @@ -0,0 +1,193 @@ +package capability + +import ( + "bytes" + "errors" + "fmt" + "strings" +) + +var ( + // ErrArgumentsRequired is returned if no arguments are giving with a + // capability that requires arguments + ErrArgumentsRequired = errors.New("arguments required") + // ErrArguments is returned if arguments are given with a capabilities that + // not supports arguments + ErrArguments = errors.New("arguments not allowed") + // ErrEmptyArgument is returned when an empty value is given + ErrEmptyArgument = errors.New("empty argument") + // ErrMultipleArguments multiple argument given to a capabilities that not + // support it + ErrMultipleArguments = errors.New("multiple arguments not allowed") +) + +// List represents a list of capabilities +type List struct { + m map[Capability]*entry + sort []string +} + +type entry struct { + Name Capability + Values []string +} + +// NewList returns a new List of capabilities +func NewList() *List { + return &List{ + m: make(map[Capability]*entry), + } +} + +// IsEmpty returns true if the List is empty +func (l *List) IsEmpty() bool { + return len(l.sort) == 0 +} + +// Decode decodes list of capabilities from raw into the list +func (l *List) Decode(raw []byte) error { + // git 1.x receive pack used to send a leading space on its + // git-receive-pack capabilities announcement. We just trim space to be + // tolerant to space changes in different versions. + raw = bytes.TrimSpace(raw) + + if len(raw) == 0 { + return nil + } + + for _, data := range bytes.Split(raw, []byte{' '}) { + pair := bytes.SplitN(data, []byte{'='}, 2) + + c := Capability(pair[0]) + if len(pair) == 1 { + if err := l.Add(c); err != nil { + return err + } + + continue + } + + if err := l.Add(c, string(pair[1])); err != nil { + return err + } + } + + return nil +} + +// Get returns the values for a capability +func (l *List) Get(capability Capability) []string { + if _, ok := l.m[capability]; !ok { + return nil + } + + return l.m[capability].Values +} + +// Set sets a capability removing the previous values +func (l *List) Set(capability Capability, values ...string) error { + delete(l.m, capability) + return l.Add(capability, values...) +} + +// Add adds a capability, values are optional +func (l *List) Add(c Capability, values ...string) error { + if err := l.validate(c, values); err != nil { + return err + } + + if !l.Supports(c) { + l.m[c] = &entry{Name: c} + l.sort = append(l.sort, c.String()) + } + + if len(values) == 0 { + return nil + } + + if known[c] && !multipleArgument[c] && len(l.m[c].Values) > 0 { + return ErrMultipleArguments + } + + l.m[c].Values = append(l.m[c].Values, values...) + return nil +} + +func (l *List) validateNoEmptyArgs(values []string) error { + for _, v := range values { + if v == "" { + return ErrEmptyArgument + } + } + return nil +} + +func (l *List) validate(c Capability, values []string) error { + if !known[c] { + return l.validateNoEmptyArgs(values) + } + if requiresArgument[c] && len(values) == 0 { + return ErrArgumentsRequired + } + + if !requiresArgument[c] && len(values) != 0 { + return ErrArguments + } + + if !multipleArgument[c] && len(values) > 1 { + return ErrMultipleArguments + } + return l.validateNoEmptyArgs(values) +} + +// Supports returns true if capability is present +func (l *List) Supports(capability Capability) bool { + _, ok := l.m[capability] + return ok +} + +// Delete deletes a capability from the List +func (l *List) Delete(capability Capability) { + if !l.Supports(capability) { + return + } + + delete(l.m, capability) + for i, c := range l.sort { + if c != string(capability) { + continue + } + + l.sort = append(l.sort[:i], l.sort[i+1:]...) + return + } +} + +// All returns a slice with all defined capabilities. +func (l *List) All() []Capability { + var cs []Capability + for _, key := range l.sort { + cs = append(cs, Capability(key)) + } + + return cs +} + +// String generates the capabilities strings, the capabilities are sorted in +// insertion order +func (l *List) String() string { + var o []string + for _, key := range l.sort { + cap := l.m[Capability(key)] + if len(cap.Values) == 0 { + o = append(o, key) + continue + } + + for _, value := range cap.Values { + o = append(o, fmt.Sprintf("%s=%s", key, value)) + } + } + + return strings.Join(o, " ") +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go new file mode 100644 index 0000000000..ab07ac8f74 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/common.go @@ -0,0 +1,70 @@ +package packp + +import ( + "fmt" +) + +type stateFn func() stateFn + +const ( + // common + hashSize = 40 + + // advrefs + head = "HEAD" + noHead = "capabilities^{}" +) + +var ( + // common + sp = []byte(" ") + eol = []byte("\n") + eq = []byte{'='} + + // advertised-refs + null = []byte("\x00") + peeled = []byte("^{}") + noHeadMark = []byte(" capabilities^{}\x00") + + // upload-request + want = []byte("want ") + shallow = []byte("shallow ") + deepen = []byte("deepen") + deepenCommits = []byte("deepen ") + deepenSince = []byte("deepen-since ") + deepenReference = []byte("deepen-not ") + + // shallow-update + unshallow = []byte("unshallow ") + + // server-response + ack = []byte("ACK") + nak = []byte("NAK") + + // updreq + shallowNoSp = []byte("shallow") +) + +func isFlush(payload []byte) bool { + return len(payload) == 0 +} + +// ErrUnexpectedData represents an unexpected data decoding a message +type ErrUnexpectedData struct { + Msg string + Data []byte +} + +// NewErrUnexpectedData returns a new ErrUnexpectedData containing the data and +// the message given +func NewErrUnexpectedData(msg string, data []byte) error { + return &ErrUnexpectedData{Msg: msg, Data: data} +} + +func (err *ErrUnexpectedData) Error() string { + if len(err.Data) == 0 { + return err.Msg + } + + return fmt.Sprintf("%s (%s)", err.Msg, err.Data) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go new file mode 100644 index 0000000000..4950d1d662 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/doc.go @@ -0,0 +1,724 @@ +package packp + +/* + +A nice way to trace the real data transmitted and received by git, use: + +GIT_TRACE_PACKET=true git ls-remote http://github.com/src-d/go-git +GIT_TRACE_PACKET=true git clone http://github.com/src-d/go-git + +Here follows a copy of the current protocol specification at the time of +this writing. + +(Please notice that most http git servers will add a flush-pkt after the +first pkt-line when using HTTP smart.) + + +Documentation Common to Pack and Http Protocols +=============================================== + +ABNF Notation +------------- + +ABNF notation as described by RFC 5234 is used within the protocol documents, +except the following replacement core rules are used: +---- + HEXDIG = DIGIT / "a" / "b" / "c" / "d" / "e" / "f" +---- + +We also define the following common rules: +---- + NUL = %x00 + zero-id = 40*"0" + obj-id = 40*(HEXDIGIT) + + refname = "HEAD" + refname /= "refs/" +---- + +A refname is a hierarchical octet string beginning with "refs/" and +not violating the 'git-check-ref-format' command's validation rules. +More specifically, they: + +. They can include slash `/` for hierarchical (directory) + grouping, but no slash-separated component can begin with a + dot `.`. + +. They must contain at least one `/`. This enforces the presence of a + category like `heads/`, `tags/` etc. but the actual names are not + restricted. + +. They cannot have two consecutive dots `..` anywhere. + +. They cannot have ASCII control characters (i.e. bytes whose + values are lower than \040, or \177 `DEL`), space, tilde `~`, + caret `^`, colon `:`, question-mark `?`, asterisk `*`, + or open bracket `[` anywhere. + +. They cannot end with a slash `/` or a dot `.`. + +. They cannot end with the sequence `.lock`. + +. They cannot contain a sequence `@{`. + +. They cannot contain a `\\`. + + +pkt-line Format +--------------- + +Much (but not all) of the payload is described around pkt-lines. + +A pkt-line is a variable length binary string. The first four bytes +of the line, the pkt-len, indicates the total length of the line, +in hexadecimal. The pkt-len includes the 4 bytes used to contain +the length's hexadecimal representation. + +A pkt-line MAY contain binary data, so implementors MUST ensure +pkt-line parsing/formatting routines are 8-bit clean. + +A non-binary line SHOULD BE terminated by an LF, which if present +MUST be included in the total length. Receivers MUST treat pkt-lines +with non-binary data the same whether or not they contain the trailing +LF (stripping the LF if present, and not complaining when it is +missing). + +The maximum length of a pkt-line's data component is 65516 bytes. +Implementations MUST NOT send pkt-line whose length exceeds 65520 +(65516 bytes of payload + 4 bytes of length data). + +Implementations SHOULD NOT send an empty pkt-line ("0004"). + +A pkt-line with a length field of 0 ("0000"), called a flush-pkt, +is a special case and MUST be handled differently than an empty +pkt-line ("0004"). + +---- + pkt-line = data-pkt / flush-pkt + + data-pkt = pkt-len pkt-payload + pkt-len = 4*(HEXDIG) + pkt-payload = (pkt-len - 4)*(OCTET) + + flush-pkt = "0000" +---- + +Examples (as C-style strings): + +---- + pkt-line actual value + --------------------------------- + "0006a\n" "a\n" + "0005a" "a" + "000bfoobar\n" "foobar\n" + "0004" "" +---- + +Packfile transfer protocols +=========================== + +Git supports transferring data in packfiles over the ssh://, git://, http:// and +file:// transports. There exist two sets of protocols, one for pushing +data from a client to a server and another for fetching data from a +server to a client. The three transports (ssh, git, file) use the same +protocol to transfer data. http is documented in http-protocol.txt. + +The processes invoked in the canonical Git implementation are 'upload-pack' +on the server side and 'fetch-pack' on the client side for fetching data; +then 'receive-pack' on the server and 'send-pack' on the client for pushing +data. The protocol functions to have a server tell a client what is +currently on the server, then for the two to negotiate the smallest amount +of data to send in order to fully update one or the other. + +pkt-line Format +--------------- + +The descriptions below build on the pkt-line format described in +protocol-common.txt. When the grammar indicate `PKT-LINE(...)`, unless +otherwise noted the usual pkt-line LF rules apply: the sender SHOULD +include a LF, but the receiver MUST NOT complain if it is not present. + +Transports +---------- +There are three transports over which the packfile protocol is +initiated. The Git transport is a simple, unauthenticated server that +takes the command (almost always 'upload-pack', though Git +servers can be configured to be globally writable, in which 'receive- +pack' initiation is also allowed) with which the client wishes to +communicate and executes it and connects it to the requesting +process. + +In the SSH transport, the client just runs the 'upload-pack' +or 'receive-pack' process on the server over the SSH protocol and then +communicates with that invoked process over the SSH connection. + +The file:// transport runs the 'upload-pack' or 'receive-pack' +process locally and communicates with it over a pipe. + +Git Transport +------------- + +The Git transport starts off by sending the command and repository +on the wire using the pkt-line format, followed by a NUL byte and a +hostname parameter, terminated by a NUL byte. + + 0032git-upload-pack /project.git\0host=myserver.com\0 + +-- + git-proto-request = request-command SP pathname NUL [ host-parameter NUL ] + request-command = "git-upload-pack" / "git-receive-pack" / + "git-upload-archive" ; case sensitive + pathname = *( %x01-ff ) ; exclude NUL + host-parameter = "host=" hostname [ ":" port ] +-- + +Only host-parameter is allowed in the git-proto-request. Clients +MUST NOT attempt to send additional parameters. It is used for the +git-daemon name based virtual hosting. See --interpolated-path +option to git daemon, with the %H/%CH format characters. + +Basically what the Git client is doing to connect to an 'upload-pack' +process on the server side over the Git protocol is this: + + $ echo -e -n \ + "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | + nc -v example.com 9418 + +If the server refuses the request for some reasons, it could abort +gracefully with an error message. + +---- + error-line = PKT-LINE("ERR" SP explanation-text) +---- + + +SSH Transport +------------- + +Initiating the upload-pack or receive-pack processes over SSH is +executing the binary on the server via SSH remote execution. +It is basically equivalent to running this: + + $ ssh git.example.com "git-upload-pack '/project.git'" + +For a server to support Git pushing and pulling for a given user over +SSH, that user needs to be able to execute one or both of those +commands via the SSH shell that they are provided on login. On some +systems, that shell access is limited to only being able to run those +two commands, or even just one of them. + +In an ssh:// format URI, it's absolute in the URI, so the '/' after +the host name (or port number) is sent as an argument, which is then +read by the remote git-upload-pack exactly as is, so it's effectively +an absolute path in the remote filesystem. + + git clone ssh://user@example.com/project.git + | + v + ssh user@example.com "git-upload-pack '/project.git'" + +In a "user@host:path" format URI, its relative to the user's home +directory, because the Git client will run: + + git clone user@example.com:project.git + | + v + ssh user@example.com "git-upload-pack 'project.git'" + +The exception is if a '~' is used, in which case +we execute it without the leading '/'. + + ssh://user@example.com/~alice/project.git, + | + v + ssh user@example.com "git-upload-pack '~alice/project.git'" + +A few things to remember here: + +- The "command name" is spelled with dash (e.g. git-upload-pack), but + this can be overridden by the client; + +- The repository path is always quoted with single quotes. + +Fetching Data From a Server +--------------------------- + +When one Git repository wants to get data that a second repository +has, the first can 'fetch' from the second. This operation determines +what data the server has that the client does not then streams that +data down to the client in packfile format. + + +Reference Discovery +------------------- + +When the client initially connects the server will immediately respond +with a listing of each reference it has (all branches and tags) along +with the object name that each reference currently points to. + + $ echo -e -n "0039git-upload-pack /schacon/gitbook.git\0host=example.com\0" | + nc -v example.com 9418 + 00887217a7c7e582c46cec22a130adf4b9d7d950fba0 HEAD\0multi_ack thin-pack + side-band side-band-64k ofs-delta shallow no-progress include-tag + 00441d3fcd5ced445d1abc402225c0b8a1299641f497 refs/heads/integration + 003f7217a7c7e582c46cec22a130adf4b9d7d950fba0 refs/heads/master + 003cb88d2441cac0977faf98efc80305012112238d9d refs/tags/v0.9 + 003c525128480b96c89e6418b1e40909bf6c5b2d580f refs/tags/v1.0 + 003fe92df48743b7bc7d26bcaabfddde0a1e20cae47c refs/tags/v1.0^{} + 0000 + +The returned response is a pkt-line stream describing each ref and +its current value. The stream MUST be sorted by name according to +the C locale ordering. + +If HEAD is a valid ref, HEAD MUST appear as the first advertised +ref. If HEAD is not a valid ref, HEAD MUST NOT appear in the +advertisement list at all, but other refs may still appear. + +The stream MUST include capability declarations behind a NUL on the +first ref. The peeled value of a ref (that is "ref^{}") MUST be +immediately after the ref itself, if presented. A conforming server +MUST peel the ref if it's an annotated tag. + +---- + advertised-refs = (no-refs / list-of-refs) + *shallow + flush-pkt + + no-refs = PKT-LINE(zero-id SP "capabilities^{}" + NUL capability-list) + + list-of-refs = first-ref *other-ref + first-ref = PKT-LINE(obj-id SP refname + NUL capability-list) + + other-ref = PKT-LINE(other-tip / other-peeled) + other-tip = obj-id SP refname + other-peeled = obj-id SP refname "^{}" + + shallow = PKT-LINE("shallow" SP obj-id) + + capability-list = capability *(SP capability) + capability = 1*(LC_ALPHA / DIGIT / "-" / "_") + LC_ALPHA = %x61-7A +---- + +Server and client MUST use lowercase for obj-id, both MUST treat obj-id +as case-insensitive. + +See protocol-capabilities.txt for a list of allowed server capabilities +and descriptions. + +Packfile Negotiation +-------------------- +After reference and capabilities discovery, the client can decide to +terminate the connection by sending a flush-pkt, telling the server it can +now gracefully terminate, and disconnect, when it does not need any pack +data. This can happen with the ls-remote command, and also can happen when +the client already is up-to-date. + +Otherwise, it enters the negotiation phase, where the client and +server determine what the minimal packfile necessary for transport is, +by telling the server what objects it wants, its shallow objects +(if any), and the maximum commit depth it wants (if any). The client +will also send a list of the capabilities it wants to be in effect, +out of what the server said it could do with the first 'want' line. + +---- + upload-request = want-list + *shallow-line + *1depth-request + flush-pkt + + want-list = first-want + *additional-want + + shallow-line = PKT-LINE("shallow" SP obj-id) + + depth-request = PKT-LINE("deepen" SP depth) / + PKT-LINE("deepen-since" SP timestamp) / + PKT-LINE("deepen-not" SP ref) + + first-want = PKT-LINE("want" SP obj-id SP capability-list) + additional-want = PKT-LINE("want" SP obj-id) + + depth = 1*DIGIT +---- + +Clients MUST send all the obj-ids it wants from the reference +discovery phase as 'want' lines. Clients MUST send at least one +'want' command in the request body. Clients MUST NOT mention an +obj-id in a 'want' command which did not appear in the response +obtained through ref discovery. + +The client MUST write all obj-ids which it only has shallow copies +of (meaning that it does not have the parents of a commit) as +'shallow' lines so that the server is aware of the limitations of +the client's history. + +The client now sends the maximum commit history depth it wants for +this transaction, which is the number of commits it wants from the +tip of the history, if any, as a 'deepen' line. A depth of 0 is the +same as not making a depth request. The client does not want to receive +any commits beyond this depth, nor does it want objects needed only to +complete those commits. Commits whose parents are not received as a +result are defined as shallow and marked as such in the server. This +information is sent back to the client in the next step. + +Once all the 'want's and 'shallow's (and optional 'deepen') are +transferred, clients MUST send a flush-pkt, to tell the server side +that it is done sending the list. + +Otherwise, if the client sent a positive depth request, the server +will determine which commits will and will not be shallow and +send this information to the client. If the client did not request +a positive depth, this step is skipped. + +---- + shallow-update = *shallow-line + *unshallow-line + flush-pkt + + shallow-line = PKT-LINE("shallow" SP obj-id) + + unshallow-line = PKT-LINE("unshallow" SP obj-id) +---- + +If the client has requested a positive depth, the server will compute +the set of commits which are no deeper than the desired depth. The set +of commits start at the client's wants. + +The server writes 'shallow' lines for each +commit whose parents will not be sent as a result. The server writes +an 'unshallow' line for each commit which the client has indicated is +shallow, but is no longer shallow at the currently requested depth +(that is, its parents will now be sent). The server MUST NOT mark +as unshallow anything which the client has not indicated was shallow. + +Now the client will send a list of the obj-ids it has using 'have' +lines, so the server can make a packfile that only contains the objects +that the client needs. In multi_ack mode, the canonical implementation +will send up to 32 of these at a time, then will send a flush-pkt. The +canonical implementation will skip ahead and send the next 32 immediately, +so that there is always a block of 32 "in-flight on the wire" at a time. + +---- + upload-haves = have-list + compute-end + + have-list = *have-line + have-line = PKT-LINE("have" SP obj-id) + compute-end = flush-pkt / PKT-LINE("done") +---- + +If the server reads 'have' lines, it then will respond by ACKing any +of the obj-ids the client said it had that the server also has. The +server will ACK obj-ids differently depending on which ack mode is +chosen by the client. + +In multi_ack mode: + + * the server will respond with 'ACK obj-id continue' for any common + commits. + + * once the server has found an acceptable common base commit and is + ready to make a packfile, it will blindly ACK all 'have' obj-ids + back to the client. + + * the server will then send a 'NAK' and then wait for another response + from the client - either a 'done' or another list of 'have' lines. + +In multi_ack_detailed mode: + + * the server will differentiate the ACKs where it is signaling + that it is ready to send data with 'ACK obj-id ready' lines, and + signals the identified common commits with 'ACK obj-id common' lines. + +Without either multi_ack or multi_ack_detailed: + + * upload-pack sends "ACK obj-id" on the first common object it finds. + After that it says nothing until the client gives it a "done". + + * upload-pack sends "NAK" on a flush-pkt if no common object + has been found yet. If one has been found, and thus an ACK + was already sent, it's silent on the flush-pkt. + +After the client has gotten enough ACK responses that it can determine +that the server has enough information to send an efficient packfile +(in the canonical implementation, this is determined when it has received +enough ACKs that it can color everything left in the --date-order queue +as common with the server, or the --date-order queue is empty), or the +client determines that it wants to give up (in the canonical implementation, +this is determined when the client sends 256 'have' lines without getting +any of them ACKed by the server - meaning there is nothing in common and +the server should just send all of its objects), then the client will send +a 'done' command. The 'done' command signals to the server that the client +is ready to receive its packfile data. + +However, the 256 limit *only* turns on in the canonical client +implementation if we have received at least one "ACK %s continue" +during a prior round. This helps to ensure that at least one common +ancestor is found before we give up entirely. + +Once the 'done' line is read from the client, the server will either +send a final 'ACK obj-id' or it will send a 'NAK'. 'obj-id' is the object +name of the last commit determined to be common. The server only sends +ACK after 'done' if there is at least one common base and multi_ack or +multi_ack_detailed is enabled. The server always sends NAK after 'done' +if there is no common base found. + +Then the server will start sending its packfile data. + +---- + server-response = *ack_multi ack / nak + ack_multi = PKT-LINE("ACK" SP obj-id ack_status) + ack_status = "continue" / "common" / "ready" + ack = PKT-LINE("ACK" SP obj-id) + nak = PKT-LINE("NAK") +---- + +A simple clone may look like this (with no 'have' lines): + +---- + C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ + side-band-64k ofs-delta\n + C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n + C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n + C: 0032want 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n + C: 0032want 74730d410fcb6603ace96f1dc55ea6196122532d\n + C: 0000 + C: 0009done\n + + S: 0008NAK\n + S: [PACKFILE] +---- + +An incremental update (fetch) response might look like this: + +---- + C: 0054want 74730d410fcb6603ace96f1dc55ea6196122532d multi_ack \ + side-band-64k ofs-delta\n + C: 0032want 7d1665144a3a975c05f1f43902ddaf084e784dbe\n + C: 0032want 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a\n + C: 0000 + C: 0032have 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01\n + C: [30 more have lines] + C: 0032have 74730d410fcb6603ace96f1dc55ea6196122532d\n + C: 0000 + + S: 003aACK 7e47fe2bd8d01d481f44d7af0531bd93d3b21c01 continue\n + S: 003aACK 74730d410fcb6603ace96f1dc55ea6196122532d continue\n + S: 0008NAK\n + + C: 0009done\n + + S: 0031ACK 74730d410fcb6603ace96f1dc55ea6196122532d\n + S: [PACKFILE] +---- + + +Packfile Data +------------- + +Now that the client and server have finished negotiation about what +the minimal amount of data that needs to be sent to the client is, the server +will construct and send the required data in packfile format. + +See pack-format.txt for what the packfile itself actually looks like. + +If 'side-band' or 'side-band-64k' capabilities have been specified by +the client, the server will send the packfile data multiplexed. + +Each packet starting with the packet-line length of the amount of data +that follows, followed by a single byte specifying the sideband the +following data is coming in on. + +In 'side-band' mode, it will send up to 999 data bytes plus 1 control +code, for a total of up to 1000 bytes in a pkt-line. In 'side-band-64k' +mode it will send up to 65519 data bytes plus 1 control code, for a +total of up to 65520 bytes in a pkt-line. + +The sideband byte will be a '1', '2' or a '3'. Sideband '1' will contain +packfile data, sideband '2' will be used for progress information that the +client will generally print to stderr and sideband '3' is used for error +information. + +If no 'side-band' capability was specified, the server will stream the +entire packfile without multiplexing. + + +Pushing Data To a Server +------------------------ + +Pushing data to a server will invoke the 'receive-pack' process on the +server, which will allow the client to tell it which references it should +update and then send all the data the server will need for those new +references to be complete. Once all the data is received and validated, +the server will then update its references to what the client specified. + +Authentication +-------------- + +The protocol itself contains no authentication mechanisms. That is to be +handled by the transport, such as SSH, before the 'receive-pack' process is +invoked. If 'receive-pack' is configured over the Git transport, those +repositories will be writable by anyone who can access that port (9418) as +that transport is unauthenticated. + +Reference Discovery +------------------- + +The reference discovery phase is done nearly the same way as it is in the +fetching protocol. Each reference obj-id and name on the server is sent +in packet-line format to the client, followed by a flush-pkt. The only +real difference is that the capability listing is different - the only +possible values are 'report-status', 'delete-refs', 'ofs-delta' and +'push-options'. + +Reference Update Request and Packfile Transfer +---------------------------------------------- + +Once the client knows what references the server is at, it can send a +list of reference update requests. For each reference on the server +that it wants to update, it sends a line listing the obj-id currently on +the server, the obj-id the client would like to update it to and the name +of the reference. + +This list is followed by a flush-pkt. Then the push options are transmitted +one per packet followed by another flush-pkt. After that the packfile that +should contain all the objects that the server will need to complete the new +references will be sent. + +---- + update-request = *shallow ( command-list | push-cert ) [packfile] + + shallow = PKT-LINE("shallow" SP obj-id) + + command-list = PKT-LINE(command NUL capability-list) + *PKT-LINE(command) + flush-pkt + + command = create / delete / update + create = zero-id SP new-id SP name + delete = old-id SP zero-id SP name + update = old-id SP new-id SP name + + old-id = obj-id + new-id = obj-id + + push-cert = PKT-LINE("push-cert" NUL capability-list LF) + PKT-LINE("certificate version 0.1" LF) + PKT-LINE("pusher" SP ident LF) + PKT-LINE("pushee" SP url LF) + PKT-LINE("nonce" SP nonce LF) + PKT-LINE(LF) + *PKT-LINE(command LF) + *PKT-LINE(gpg-signature-lines LF) + PKT-LINE("push-cert-end" LF) + + packfile = "PACK" 28*(OCTET) +---- + +If the receiving end does not support delete-refs, the sending end MUST +NOT ask for delete command. + +If the receiving end does not support push-cert, the sending end +MUST NOT send a push-cert command. When a push-cert command is +sent, command-list MUST NOT be sent; the commands recorded in the +push certificate is used instead. + +The packfile MUST NOT be sent if the only command used is 'delete'. + +A packfile MUST be sent if either create or update command is used, +even if the server already has all the necessary objects. In this +case the client MUST send an empty packfile. The only time this +is likely to happen is if the client is creating +a new branch or a tag that points to an existing obj-id. + +The server will receive the packfile, unpack it, then validate each +reference that is being updated that it hasn't changed while the request +was being processed (the obj-id is still the same as the old-id), and +it will run any update hooks to make sure that the update is acceptable. +If all of that is fine, the server will then update the references. + +Push Certificate +---------------- + +A push certificate begins with a set of header lines. After the +header and an empty line, the protocol commands follow, one per +line. Note that the trailing LF in push-cert PKT-LINEs is _not_ +optional; it must be present. + +Currently, the following header fields are defined: + +`pusher` ident:: + Identify the GPG key in "Human Readable Name " + format. + +`pushee` url:: + The repository URL (anonymized, if the URL contains + authentication material) the user who ran `git push` + intended to push into. + +`nonce` nonce:: + The 'nonce' string the receiving repository asked the + pushing user to include in the certificate, to prevent + replay attacks. + +The GPG signature lines are a detached signature for the contents +recorded in the push certificate before the signature block begins. +The detached signature is used to certify that the commands were +given by the pusher, who must be the signer. + +Report Status +------------- + +After receiving the pack data from the sender, the receiver sends a +report if 'report-status' capability is in effect. +It is a short listing of what happened in that update. It will first +list the status of the packfile unpacking as either 'unpack ok' or +'unpack [error]'. Then it will list the status for each of the references +that it tried to update. Each line is either 'ok [refname]' if the +update was successful, or 'ng [refname] [error]' if the update was not. + +---- + report-status = unpack-status + 1*(command-status) + flush-pkt + + unpack-status = PKT-LINE("unpack" SP unpack-result) + unpack-result = "ok" / error-msg + + command-status = command-ok / command-fail + command-ok = PKT-LINE("ok" SP refname) + command-fail = PKT-LINE("ng" SP refname SP error-msg) + + error-msg = 1*(OCTECT) ; where not "ok" +---- + +Updates can be unsuccessful for a number of reasons. The reference can have +changed since the reference discovery phase was originally sent, meaning +someone pushed in the meantime. The reference being pushed could be a +non-fast-forward reference and the update hooks or configuration could be +set to not allow that, etc. Also, some references can be updated while others +can be rejected. + +An example client/server communication might look like this: + +---- + S: 007c74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/local\0report-status delete-refs ofs-delta\n + S: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe refs/heads/debug\n + S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/master\n + S: 003f74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/team\n + S: 0000 + + C: 003e7d1665144a3a975c05f1f43902ddaf084e784dbe 74730d410fcb6603ace96f1dc55ea6196122532d refs/heads/debug\n + C: 003e74730d410fcb6603ace96f1dc55ea6196122532d 5a3f6be755bbb7deae50065988cbfa1ffa9ab68a refs/heads/master\n + C: 0000 + C: [PACKDATA] + + S: 000eunpack ok\n + S: 0018ok refs/heads/debug\n + S: 002ang refs/heads/master non-fast-forward\n +---- +*/ diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go new file mode 100644 index 0000000000..e2a0a108b2 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/report_status.go @@ -0,0 +1,165 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +const ( + ok = "ok" +) + +// ReportStatus is a report status message, as used in the git-receive-pack +// process whenever the 'report-status' capability is negotiated. +type ReportStatus struct { + UnpackStatus string + CommandStatuses []*CommandStatus +} + +// NewReportStatus creates a new ReportStatus message. +func NewReportStatus() *ReportStatus { + return &ReportStatus{} +} + +// Error returns the first error if any. +func (s *ReportStatus) Error() error { + if s.UnpackStatus != ok { + return fmt.Errorf("unpack error: %s", s.UnpackStatus) + } + + for _, s := range s.CommandStatuses { + if err := s.Error(); err != nil { + return err + } + } + + return nil +} + +// Encode writes the report status to a writer. +func (s *ReportStatus) Encode(w io.Writer) error { + e := pktline.NewEncoder(w) + if err := e.Encodef("unpack %s\n", s.UnpackStatus); err != nil { + return err + } + + for _, cs := range s.CommandStatuses { + if err := cs.encode(w); err != nil { + return err + } + } + + return e.Flush() +} + +// Decode reads from the given reader and decodes a report-status message. It +// does not read more input than what is needed to fill the report status. +func (s *ReportStatus) Decode(r io.Reader) error { + scan := pktline.NewScanner(r) + if err := s.scanFirstLine(scan); err != nil { + return err + } + + if err := s.decodeReportStatus(scan.Bytes()); err != nil { + return err + } + + flushed := false + for scan.Scan() { + b := scan.Bytes() + if isFlush(b) { + flushed = true + break + } + + if err := s.decodeCommandStatus(b); err != nil { + return err + } + } + + if !flushed { + return fmt.Errorf("missing flush") + } + + return scan.Err() +} + +func (s *ReportStatus) scanFirstLine(scan *pktline.Scanner) error { + if scan.Scan() { + return nil + } + + if scan.Err() != nil { + return scan.Err() + } + + return io.ErrUnexpectedEOF +} + +func (s *ReportStatus) decodeReportStatus(b []byte) error { + if isFlush(b) { + return fmt.Errorf("premature flush") + } + + b = bytes.TrimSuffix(b, eol) + + line := string(b) + fields := strings.SplitN(line, " ", 2) + if len(fields) != 2 || fields[0] != "unpack" { + return fmt.Errorf("malformed unpack status: %s", line) + } + + s.UnpackStatus = fields[1] + return nil +} + +func (s *ReportStatus) decodeCommandStatus(b []byte) error { + b = bytes.TrimSuffix(b, eol) + + line := string(b) + fields := strings.SplitN(line, " ", 3) + status := ok + if len(fields) == 3 && fields[0] == "ng" { + status = fields[2] + } else if len(fields) != 2 || fields[0] != "ok" { + return fmt.Errorf("malformed command status: %s", line) + } + + cs := &CommandStatus{ + ReferenceName: plumbing.ReferenceName(fields[1]), + Status: status, + } + s.CommandStatuses = append(s.CommandStatuses, cs) + return nil +} + +// CommandStatus is the status of a reference in a report status. +// See ReportStatus struct. +type CommandStatus struct { + ReferenceName plumbing.ReferenceName + Status string +} + +// Error returns the error, if any. +func (s *CommandStatus) Error() error { + if s.Status == ok { + return nil + } + + return fmt.Errorf("command error on %s: %s", + s.ReferenceName.String(), s.Status) +} + +func (s *CommandStatus) encode(w io.Writer) error { + e := pktline.NewEncoder(w) + if s.Error() == nil { + return e.Encodef("ok %s\n", s.ReferenceName.String()) + } + + return e.Encodef("ng %s %s\n", s.ReferenceName.String(), s.Status) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go new file mode 100644 index 0000000000..fe4fe68879 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/shallowupd.go @@ -0,0 +1,92 @@ +package packp + +import ( + "bytes" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +const ( + shallowLineLen = 48 + unshallowLineLen = 50 +) + +type ShallowUpdate struct { + Shallows []plumbing.Hash + Unshallows []plumbing.Hash +} + +func (r *ShallowUpdate) Decode(reader io.Reader) error { + s := pktline.NewScanner(reader) + + for s.Scan() { + line := s.Bytes() + line = bytes.TrimSpace(line) + + var err error + switch { + case bytes.HasPrefix(line, shallow): + err = r.decodeShallowLine(line) + case bytes.HasPrefix(line, unshallow): + err = r.decodeUnshallowLine(line) + case bytes.Equal(line, pktline.Flush): + return nil + } + + if err != nil { + return err + } + } + + return s.Err() +} + +func (r *ShallowUpdate) decodeShallowLine(line []byte) error { + hash, err := r.decodeLine(line, shallow, shallowLineLen) + if err != nil { + return err + } + + r.Shallows = append(r.Shallows, hash) + return nil +} + +func (r *ShallowUpdate) decodeUnshallowLine(line []byte) error { + hash, err := r.decodeLine(line, unshallow, unshallowLineLen) + if err != nil { + return err + } + + r.Unshallows = append(r.Unshallows, hash) + return nil +} + +func (r *ShallowUpdate) decodeLine(line, prefix []byte, expLen int) (plumbing.Hash, error) { + if len(line) != expLen { + return plumbing.ZeroHash, fmt.Errorf("malformed %s%q", prefix, line) + } + + raw := string(line[expLen-40 : expLen]) + return plumbing.NewHash(raw), nil +} + +func (r *ShallowUpdate) Encode(w io.Writer) error { + e := pktline.NewEncoder(w) + + for _, h := range r.Shallows { + if err := e.Encodef("%s%s\n", shallow, h.String()); err != nil { + return err + } + } + + for _, h := range r.Unshallows { + if err := e.Encodef("%s%s\n", unshallow, h.String()); err != nil { + return err + } + } + + return e.Flush() +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go new file mode 100644 index 0000000000..de5001281f --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/common.go @@ -0,0 +1,33 @@ +package sideband + +// Type sideband type "side-band" or "side-band-64k" +type Type int8 + +const ( + // Sideband legacy sideband type up to 1000-byte messages + Sideband Type = iota + // Sideband64k sideband type up to 65519-byte messages + Sideband64k Type = iota + + // MaxPackedSize for Sideband type + MaxPackedSize = 1000 + // MaxPackedSize64k for Sideband64k type + MaxPackedSize64k = 65520 +) + +// Channel sideband channel +type Channel byte + +// WithPayload encode the payload as a message +func (ch Channel) WithPayload(payload []byte) []byte { + return append([]byte{byte(ch)}, payload...) +} + +const ( + // PackData packfile content + PackData Channel = 1 + // ProgressMessage progress messages + ProgressMessage Channel = 2 + // ErrorMessage fatal error message just before stream aborts + ErrorMessage Channel = 3 +) diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go new file mode 100644 index 0000000000..0116f962ef --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go @@ -0,0 +1,148 @@ +package sideband + +import ( + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// ErrMaxPackedExceeded returned by Read, if the maximum packed size is exceeded +var ErrMaxPackedExceeded = errors.New("max. packed size exceeded") + +// Progress where the progress information is stored +type Progress interface { + io.Writer +} + +// Demuxer demultiplexes the progress reports and error info interleaved with the +// packfile itself. +// +// A sideband has three different channels the main one, called PackData, contains +// the packfile data; the ErrorMessage channel, that contains server errors; and +// the last one, ProgressMessage channel, containing information about the ongoing +// task happening in the server (optional, can be suppressed sending NoProgress +// or Quiet capabilities to the server) +// +// In order to demultiplex the data stream, method `Read` should be called to +// retrieve the PackData channel, the incoming data from the ProgressMessage is +// written at `Progress` (if any), if any message is retrieved from the +// ErrorMessage channel an error is returned and we can assume that the +// connection has been closed. +type Demuxer struct { + t Type + r io.Reader + s *pktline.Scanner + + max int + pending []byte + + // Progress is where the progress messages are stored + Progress Progress +} + +// NewDemuxer returns a new Demuxer for the given t and read from r +func NewDemuxer(t Type, r io.Reader) *Demuxer { + max := MaxPackedSize64k + if t == Sideband { + max = MaxPackedSize + } + + return &Demuxer{ + t: t, + r: r, + max: max, + s: pktline.NewScanner(r), + } +} + +// Read reads up to len(p) bytes from the PackData channel into p, an error can +// be return if an error happens when reading or if a message is sent in the +// ErrorMessage channel. +// +// When a ProgressMessage is read, is not copy to b, instead of this is written +// to the Progress +func (d *Demuxer) Read(b []byte) (n int, err error) { + var read, req int + + req = len(b) + for read < req { + n, err := d.doRead(b[read:req]) + read += n + + if err != nil { + return read, err + } + } + + return read, nil +} + +func (d *Demuxer) doRead(b []byte) (int, error) { + read, err := d.nextPackData() + size := len(read) + wanted := len(b) + + if size > wanted { + d.pending = read[wanted:] + } + + if wanted > size { + wanted = size + } + + size = copy(b, read[:wanted]) + return size, err +} + +func (d *Demuxer) nextPackData() ([]byte, error) { + content := d.getPending() + if len(content) != 0 { + return content, nil + } + + if !d.s.Scan() { + if err := d.s.Err(); err != nil { + return nil, err + } + + return nil, io.EOF + } + + content = d.s.Bytes() + + size := len(content) + if size == 0 { + return nil, nil + } else if size > d.max { + return nil, ErrMaxPackedExceeded + } + + switch Channel(content[0]) { + case PackData: + return content[1:], nil + case ProgressMessage: + if d.Progress != nil { + _, err := d.Progress.Write(content[1:]) + return nil, err + } + case ErrorMessage: + return nil, fmt.Errorf("unexpected error: %s", content[1:]) + default: + return nil, fmt.Errorf("unknown channel %s", content) + } + + return nil, nil +} + +func (d *Demuxer) getPending() (b []byte) { + if len(d.pending) == 0 { + return nil + } + + content := d.pending + d.pending = nil + + return content +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go new file mode 100644 index 0000000000..c5d2429529 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/doc.go @@ -0,0 +1,31 @@ +// Package sideband implements a sideband mutiplex/demultiplexer +package sideband + +// If 'side-band' or 'side-band-64k' capabilities have been specified by +// the client, the server will send the packfile data multiplexed. +// +// Either mode indicates that the packfile data will be streamed broken +// up into packets of up to either 1000 bytes in the case of 'side_band', +// or 65520 bytes in the case of 'side_band_64k'. Each packet is made up +// of a leading 4-byte pkt-line length of how much data is in the packet, +// followed by a 1-byte stream code, followed by the actual data. +// +// The stream code can be one of: +// +// 1 - pack data +// 2 - progress messages +// 3 - fatal error message just before stream aborts +// +// The "side-band-64k" capability came about as a way for newer clients +// that can handle much larger packets to request packets that are +// actually crammed nearly full, while maintaining backward compatibility +// for the older clients. +// +// Further, with side-band and its up to 1000-byte messages, it's actually +// 999 bytes of payload and 1 byte for the stream code. With side-band-64k, +// same deal, you have up to 65519 bytes of data and 1 byte for the stream +// code. +// +// The client MUST send only maximum of one of "side-band" and "side- +// band-64k". Server MUST diagnose it as an error if client requests +// both. diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go new file mode 100644 index 0000000000..d51ac82695 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/muxer.go @@ -0,0 +1,65 @@ +package sideband + +import ( + "io" + + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Muxer multiplex the packfile along with the progress messages and the error +// information. The multiplex is perform using pktline format. +type Muxer struct { + max int + e *pktline.Encoder +} + +const chLen = 1 + +// NewMuxer returns a new Muxer for the given t that writes on w. +// +// If t is equal to `Sideband` the max pack size is set to MaxPackedSize, in any +// other value is given, max pack is set to MaxPackedSize64k, that is the +// maximum length of a line in pktline format. +func NewMuxer(t Type, w io.Writer) *Muxer { + max := MaxPackedSize64k + if t == Sideband { + max = MaxPackedSize + } + + return &Muxer{ + max: max - chLen, + e: pktline.NewEncoder(w), + } +} + +// Write writes p in the PackData channel +func (m *Muxer) Write(p []byte) (int, error) { + return m.WriteChannel(PackData, p) +} + +// WriteChannel writes p in the given channel. This method can be used with any +// channel, but is recommend use it only for the ProgressMessage and +// ErrorMessage channels and use Write for the PackData channel +func (m *Muxer) WriteChannel(t Channel, p []byte) (int, error) { + wrote := 0 + size := len(p) + for wrote < size { + n, err := m.doWrite(t, p[wrote:]) + wrote += n + + if err != nil { + return wrote, err + } + } + + return wrote, nil +} + +func (m *Muxer) doWrite(ch Channel, p []byte) (int, error) { + sz := len(p) + if sz > m.max { + sz = m.max + } + + return sz, m.e.Encode(ch.WithPayload(p[:sz])) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go new file mode 100644 index 0000000000..b3a7ee804c --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go @@ -0,0 +1,127 @@ +package packp + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +const ackLineLen = 44 + +// ServerResponse object acknowledgement from upload-pack service +type ServerResponse struct { + ACKs []plumbing.Hash +} + +// Decode decodes the response into the struct, isMultiACK should be true, if +// the request was done with multi_ack or multi_ack_detailed capabilities. +func (r *ServerResponse) Decode(reader *bufio.Reader, isMultiACK bool) error { + // TODO: implement support for multi_ack or multi_ack_detailed responses + if isMultiACK { + return errors.New("multi_ack and multi_ack_detailed are not supported") + } + + s := pktline.NewScanner(reader) + + for s.Scan() { + line := s.Bytes() + + if err := r.decodeLine(line); err != nil { + return err + } + + // we need to detect when the end of a response header and the beginning + // of a packfile header happened, some requests to the git daemon + // produces a duplicate ACK header even when multi_ack is not supported. + stop, err := r.stopReading(reader) + if err != nil { + return err + } + + if stop { + break + } + } + + return s.Err() +} + +// stopReading detects when a valid command such as ACK or NAK is found to be +// read in the buffer without moving the read pointer. +func (r *ServerResponse) stopReading(reader *bufio.Reader) (bool, error) { + ahead, err := reader.Peek(7) + if err == io.EOF { + return true, nil + } + + if err != nil { + return false, err + } + + if len(ahead) > 4 && r.isValidCommand(ahead[0:3]) { + return false, nil + } + + if len(ahead) == 7 && r.isValidCommand(ahead[4:]) { + return false, nil + } + + return true, nil +} + +func (r *ServerResponse) isValidCommand(b []byte) bool { + commands := [][]byte{ack, nak} + for _, c := range commands { + if bytes.Equal(b, c) { + return true + } + } + + return false +} + +func (r *ServerResponse) decodeLine(line []byte) error { + if len(line) == 0 { + return fmt.Errorf("unexpected flush") + } + + if bytes.Equal(line[0:3], ack) { + return r.decodeACKLine(line) + } + + if bytes.Equal(line[0:3], nak) { + return nil + } + + return fmt.Errorf("unexpected content %q", string(line)) +} + +func (r *ServerResponse) decodeACKLine(line []byte) error { + if len(line) < ackLineLen { + return fmt.Errorf("malformed ACK %q", line) + } + + sp := bytes.Index(line, []byte(" ")) + h := plumbing.NewHash(string(line[sp+1 : sp+41])) + r.ACKs = append(r.ACKs, h) + return nil +} + +// Encode encodes the ServerResponse into a writer. +func (r *ServerResponse) Encode(w io.Writer) error { + if len(r.ACKs) > 1 { + return errors.New("multi_ack and multi_ack_detailed are not supported") + } + + e := pktline.NewEncoder(w) + if len(r.ACKs) == 0 { + return e.Encodef("%s\n", nak) + } + + return e.Encodef("%s %s\n", ack, r.ACKs[0].String()) +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go new file mode 100644 index 0000000000..ddec06e996 --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go @@ -0,0 +1,168 @@ +package packp + +import ( + "fmt" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/protocol/packp/capability" +) + +// UploadRequest values represent the information transmitted on a +// upload-request message. Values from this type are not zero-value +// safe, use the New function instead. +// This is a low level type, use UploadPackRequest instead. +type UploadRequest struct { + Capabilities *capability.List + Wants []plumbing.Hash + Shallows []plumbing.Hash + Depth Depth +} + +// Depth values stores the desired depth of the requested packfile: see +// DepthCommit, DepthSince and DepthReference. +type Depth interface { + isDepth() + IsZero() bool +} + +// DepthCommits values stores the maximum number of requested commits in +// the packfile. Zero means infinite. A negative value will have +// undefined consequences. +type DepthCommits int + +func (d DepthCommits) isDepth() {} + +func (d DepthCommits) IsZero() bool { + return d == 0 +} + +// DepthSince values requests only commits newer than the specified time. +type DepthSince time.Time + +func (d DepthSince) isDepth() {} + +func (d DepthSince) IsZero() bool { + return time.Time(d).IsZero() +} + +// DepthReference requests only commits not to found in the specified reference. +type DepthReference string + +func (d DepthReference) isDepth() {} + +func (d DepthReference) IsZero() bool { + return string(d) == "" +} + +// NewUploadRequest returns a pointer to a new UploadRequest value, ready to be +// used. It has no capabilities, wants or shallows and an infinite depth. Please +// note that to encode an upload-request it has to have at least one wanted hash. +func NewUploadRequest() *UploadRequest { + return &UploadRequest{ + Capabilities: capability.NewList(), + Wants: []plumbing.Hash{}, + Shallows: []plumbing.Hash{}, + Depth: DepthCommits(0), + } +} + +// NewUploadRequestFromCapabilities returns a pointer to a new UploadRequest +// value, the request capabilities are filled with the most optimal ones, based +// on the adv value (advertised capabilities), the UploadRequest generated it +// has no wants or shallows and an infinite depth. +func NewUploadRequestFromCapabilities(adv *capability.List) *UploadRequest { + r := NewUploadRequest() + + if adv.Supports(capability.MultiACKDetailed) { + r.Capabilities.Set(capability.MultiACKDetailed) + } else if adv.Supports(capability.MultiACK) { + r.Capabilities.Set(capability.MultiACK) + } + + if adv.Supports(capability.Sideband64k) { + r.Capabilities.Set(capability.Sideband64k) + } else if adv.Supports(capability.Sideband) { + r.Capabilities.Set(capability.Sideband) + } + + if adv.Supports(capability.ThinPack) { + r.Capabilities.Set(capability.ThinPack) + } + + if adv.Supports(capability.OFSDelta) { + r.Capabilities.Set(capability.OFSDelta) + } + + if adv.Supports(capability.Agent) { + r.Capabilities.Set(capability.Agent, capability.DefaultAgent) + } + + return r +} + +// Validate validates the content of UploadRequest, following the next rules: +// - Wants MUST have at least one reference +// - capability.Shallow MUST be present if Shallows is not empty +// - is a non-zero DepthCommits is given capability.Shallow MUST be present +// - is a DepthSince is given capability.Shallow MUST be present +// - is a DepthReference is given capability.DeepenNot MUST be present +// - MUST contain only maximum of one of capability.Sideband and capability.Sideband64k +// - MUST contain only maximum of one of capability.MultiACK and capability.MultiACKDetailed +func (req *UploadRequest) Validate() error { + if len(req.Wants) == 0 { + return fmt.Errorf("want can't be empty") + } + + if err := req.validateRequiredCapabilities(); err != nil { + return err + } + + if err := req.validateConflictCapabilities(); err != nil { + return err + } + + return nil +} + +func (req *UploadRequest) validateRequiredCapabilities() error { + msg := "missing capability %s" + + if len(req.Shallows) != 0 && !req.Capabilities.Supports(capability.Shallow) { + return fmt.Errorf(msg, capability.Shallow) + } + + switch req.Depth.(type) { + case DepthCommits: + if req.Depth != DepthCommits(0) { + if !req.Capabilities.Supports(capability.Shallow) { + return fmt.Errorf(msg, capability.Shallow) + } + } + case DepthSince: + if !req.Capabilities.Supports(capability.DeepenSince) { + return fmt.Errorf(msg, capability.DeepenSince) + } + case DepthReference: + if !req.Capabilities.Supports(capability.DeepenNot) { + return fmt.Errorf(msg, capability.DeepenNot) + } + } + + return nil +} + +func (req *UploadRequest) validateConflictCapabilities() error { + msg := "capabilities %s and %s are mutually exclusive" + if req.Capabilities.Supports(capability.Sideband) && + req.Capabilities.Supports(capability.Sideband64k) { + return fmt.Errorf(msg, capability.Sideband, capability.Sideband64k) + } + + if req.Capabilities.Supports(capability.MultiACK) && + req.Capabilities.Supports(capability.MultiACKDetailed) { + return fmt.Errorf(msg, capability.MultiACK, capability.MultiACKDetailed) + } + + return nil +} diff --git a/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go new file mode 100644 index 0000000000..895a3bf6dd --- /dev/null +++ b/vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_decode.go @@ -0,0 +1,257 @@ +package packp + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "strconv" + "time" + + "github.com/go-git/go-git/v5/plumbing" + "github.com/go-git/go-git/v5/plumbing/format/pktline" +) + +// Decode reads the next upload-request form its input and +// stores it in the UploadRequest. +func (req *UploadRequest) Decode(r io.Reader) error { + d := newUlReqDecoder(r) + return d.Decode(req) +} + +type ulReqDecoder struct { + s *pktline.Scanner // a pkt-line scanner from the input stream + line []byte // current pkt-line contents, use parser.nextLine() to make it advance + nLine int // current pkt-line number for debugging, begins at 1 + err error // sticky error, use the parser.error() method to fill this out + data *UploadRequest // parsed data is stored here +} + +func newUlReqDecoder(r io.Reader) *ulReqDecoder { + return &ulReqDecoder{ + s: pktline.NewScanner(r), + } +} + +func (d *ulReqDecoder) Decode(v *UploadRequest) error { + d.data = v + + for state := d.decodeFirstWant; state != nil; { + state = state() + } + + return d.err +} + +// fills out the parser stiky error +func (d *ulReqDecoder) error(format string, a ...interface{}) { + msg := fmt.Sprintf( + "pkt-line %d: %s", d.nLine, + fmt.Sprintf(format, a...), + ) + + d.err = NewErrUnexpectedData(msg, d.line) +} + +// Reads a new pkt-line from the scanner, makes its payload available as +// p.line and increments p.nLine. A successful invocation returns true, +// otherwise, false is returned and the sticky error is filled out +// accordingly. Trims eols at the end of the payloads. +func (d *ulReqDecoder) nextLine() bool { + d.nLine++ + + if !d.s.Scan() { + if d.err = d.s.Err(); d.err != nil { + return false + } + + d.error("EOF") + return false + } + + d.line = d.s.Bytes() + d.line = bytes.TrimSuffix(d.line, eol) + + return true +} + +// Expected format: want [ capabilities] +func (d *ulReqDecoder) decodeFirstWant() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if !bytes.HasPrefix(d.line, want) { + d.error("missing 'want ' prefix") + return nil + } + d.line = bytes.TrimPrefix(d.line, want) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Wants = append(d.data.Wants, hash) + + return d.decodeCaps +} + +func (d *ulReqDecoder) readHash() (plumbing.Hash, bool) { + if len(d.line) < hashSize { + d.err = fmt.Errorf("malformed hash: %v", d.line) + return plumbing.ZeroHash, false + } + + var hash plumbing.Hash + if _, err := hex.Decode(hash[:], d.line[:hashSize]); err != nil { + d.error("invalid hash text: %s", err) + return plumbing.ZeroHash, false + } + d.line = d.line[hashSize:] + + return hash, true +} + +// Expected format: sp cap1 sp cap2 sp cap3... +func (d *ulReqDecoder) decodeCaps() stateFn { + d.line = bytes.TrimPrefix(d.line, sp) + if err := d.data.Capabilities.Decode(d.line); err != nil { + d.error("invalid capabilities: %s", err) + } + + return d.decodeOtherWants +} + +// Expected format: want +func (d *ulReqDecoder) decodeOtherWants() stateFn { + if ok := d.nextLine(); !ok { + return nil + } + + if bytes.HasPrefix(d.line, shallow) { + return d.decodeShallow + } + + if bytes.HasPrefix(d.line, deepen) { + return d.decodeDeepen + } + + if len(d.line) == 0 { + return nil + } + + if !bytes.HasPrefix(d.line, want) { + d.error("unexpected payload while expecting a want: %q", d.line) + return nil + } + d.line = bytes.TrimPrefix(d.line, want) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Wants = append(d.data.Wants, hash) + + return d.decodeOtherWants +} + +// Expected format: shallow +func (d *ulReqDecoder) decodeShallow() stateFn { + if bytes.HasPrefix(d.line, deepen) { + return d.decodeDeepen + } + + if len(d.line) == 0 { + return nil + } + + if !bytes.HasPrefix(d.line, shallow) { + d.error("unexpected payload while expecting a shallow: %q", d.line) + return nil + } + d.line = bytes.TrimPrefix(d.line, shallow) + + hash, ok := d.readHash() + if !ok { + return nil + } + d.data.Shallows = append(d.data.Shallows, hash) + + if ok := d.nextLine(); !ok { + return nil + } + + return d.decodeShallow +} + +// Expected format: deepen / deepen-since