From 18cdcf829d5696153fda549f3af4e42ca6617bd0 Mon Sep 17 00:00:00 2001 From: CrazyMax Date: Mon, 11 Oct 2021 13:55:25 +0200 Subject: [PATCH] Set buildx as default builder Signed-off-by: CrazyMax --- cli-plugins/manager/manager.go | 30 + cli-plugins/manager/manager_test.go | 23 + cli/command/cli.go | 15 - cli/command/image/build.go | 108 +- cli/command/image/build_buildkit.go | 525 - cli/command/image/build_session.go | 69 - cli/command/image/build_test.go | 71 - cmd/docker/aliases.go | 50 + cmd/docker/builder.go | 111 + cmd/docker/builder_test.go | 46 + cmd/docker/docker.go | 42 - dockerfiles/Dockerfile.dev | 8 +- dockerfiles/Dockerfile.e2e | 6 + e2e/image/build_test.go | 26 +- vendor/github.com/containerd/console/LICENSE | 191 - .../github.com/containerd/console/README.md | 29 - .../github.com/containerd/console/console.go | 87 - .../containerd/console/console_linux.go | 280 - .../containerd/console/console_unix.go | 156 - .../containerd/console/console_windows.go | 216 - vendor/github.com/containerd/console/go.mod | 8 - .../containerd/console/pty_freebsd_cgo.go | 45 - .../containerd/console/pty_freebsd_nocgo.go | 36 - .../github.com/containerd/console/pty_unix.go | 30 - .../containerd/console/tc_darwin.go | 44 - .../containerd/console/tc_freebsd_cgo.go | 57 - .../containerd/console/tc_freebsd_nocgo.go | 55 - .../github.com/containerd/console/tc_linux.go | 51 - .../containerd/console/tc_netbsd.go | 45 - .../containerd/console/tc_openbsd_cgo.go | 51 - .../containerd/console/tc_openbsd_nocgo.go | 47 - .../containerd/console/tc_solaris_cgo.go | 51 - .../containerd/console/tc_solaris_nocgo.go | 47 - .../github.com/containerd/console/tc_unix.go | 91 - .../containerd/containerd/api/README.md | 18 - .../api/services/content/v1/content.pb.go | 5425 -------- .../api/services/content/v1/content.proto | 334 - .../containerd/containerd/content/adaptor.go | 52 - .../containerd/containerd/content/content.go | 182 - .../containerd/containerd/content/helpers.go | 275 - .../containerd/content/local/locks.go | 56 - .../containerd/content/local/readerat.go | 68 - .../containerd/content/local/store.go | 701 - .../containerd/content/local/store_bsd.go | 33 - .../containerd/content/local/store_openbsd.go | 33 - .../containerd/content/local/store_unix.go | 33 - .../containerd/content/local/store_windows.go | 26 - .../containerd/content/local/writer.go | 207 - .../content/proxy/content_reader.go | 71 - .../containerd/content/proxy/content_store.go | 234 - .../content/proxy/content_writer.go | 146 - .../containerd/defaults/defaults.go | 32 - .../containerd/defaults/defaults_unix.go | 39 - .../containerd/defaults/defaults_windows.go | 48 - .../containerd/containerd/defaults/doc.go | 19 - .../containerd/containerd/filters/adaptor.go | 33 - .../containerd/containerd/filters/filter.go | 179 - .../containerd/containerd/filters/parser.go | 292 - .../containerd/containerd/filters/quote.go | 253 - .../containerd/containerd/filters/scanner.go | 297 - .../containerd/remotes/docker/auth/fetch.go | 209 - .../containerd/remotes/docker/auth/parse.go | 203 - .../containerd/remotes/errors/errors.go | 56 - .../content/contentserver/contentserver.go | 463 - .../containerd/containerd/version/version.go | 34 - .../github.com/containerd/continuity/LICENSE | 191 - .../containerd/continuity/README.md | 88 - .../github.com/containerd/continuity/go.mod | 15 - .../containerd/continuity/sysx/README.md | 3 - .../continuity/sysx/nodata_linux.go | 23 - .../continuity/sysx/nodata_solaris.go | 24 - .../containerd/continuity/sysx/nodata_unix.go | 25 - .../containerd/continuity/sysx/xattr.go | 117 - .../continuity/sysx/xattr_unsupported.go | 67 - vendor/github.com/containerd/typeurl/LICENSE | 191 - .../github.com/containerd/typeurl/README.md | 20 - vendor/github.com/containerd/typeurl/doc.go | 83 - vendor/github.com/containerd/typeurl/go.mod | 8 - vendor/github.com/containerd/typeurl/types.go | 214 - vendor/github.com/gofrs/flock/LICENSE | 27 - vendor/github.com/gofrs/flock/README.md | 41 - vendor/github.com/gofrs/flock/flock.go | 135 - vendor/github.com/gofrs/flock/flock_unix.go | 197 - vendor/github.com/gofrs/flock/flock_winapi.go | 76 - .../github.com/gofrs/flock/flock_windows.go | 142 - vendor/github.com/gogo/googleapis/LICENSE | 203 - vendor/github.com/gogo/googleapis/Readme.md | 5 - vendor/github.com/gogo/googleapis/go.mod | 5 - .../gogo/googleapis/google/rpc/code.pb.go | 257 - .../gogo/googleapis/google/rpc/code.proto | 185 - .../googleapis/google/rpc/error_details.pb.go | 4904 ------- .../googleapis/google/rpc/error_details.proto | 200 - .../gogo/googleapis/google/rpc/status.pb.go | 731 - .../gogo/googleapis/google/rpc/status.proto | 94 - .../grpc-ecosystem/go-grpc-middleware/LICENSE | 201 - .../go-grpc-middleware/README.md | 85 - .../go-grpc-middleware/chain.go | 120 - .../grpc-ecosystem/go-grpc-middleware/doc.go | 69 - .../grpc-ecosystem/go-grpc-middleware/go.mod | 22 - .../go-grpc-middleware/wrappers.go | 30 - .../grpc-opentracing/go/otgrpc/README.md | 57 - .../grpc-opentracing/go/otgrpc/client.go | 239 - .../grpc-opentracing/go/otgrpc/errors.go | 69 - .../grpc-opentracing/go/otgrpc/options.go | 76 - .../grpc-opentracing/go/otgrpc/package.go | 5 - .../grpc-opentracing/go/otgrpc/server.go | 141 - .../grpc-opentracing/go/otgrpc/shared.go | 42 - vendor/github.com/jaguilar/vt100/LICENSE | 22 - vendor/github.com/jaguilar/vt100/README.md | 54 - vendor/github.com/jaguilar/vt100/command.go | 288 - vendor/github.com/jaguilar/vt100/go.mod | 5 - vendor/github.com/jaguilar/vt100/scanner.go | 97 - vendor/github.com/jaguilar/vt100/vt100.go | 435 - .../api/services/control/control.pb.go | 6429 --------- .../api/services/control/control.proto | 147 - .../buildkit/api/services/control/generate.go | 3 - .../moby/buildkit/api/types/generate.go | 3 - .../moby/buildkit/api/types/worker.pb.go | 923 -- .../moby/buildkit/api/types/worker.proto | 24 - .../github.com/moby/buildkit/client/build.go | 150 - .../moby/buildkit/client/buildid/metadata.go | 29 - .../github.com/moby/buildkit/client/client.go | 203 - .../moby/buildkit/client/client_unix.go | 20 - .../moby/buildkit/client/client_windows.go | 25 - .../buildkit/client/connhelper/connhelper.go | 38 - .../moby/buildkit/client/diskusage.go | 84 - .../moby/buildkit/client/exporters.go | 9 - .../github.com/moby/buildkit/client/filter.go | 19 - .../github.com/moby/buildkit/client/graph.go | 46 - .../moby/buildkit/client/llb/async.go | 98 - .../moby/buildkit/client/llb/definition.go | 227 - .../moby/buildkit/client/llb/exec.go | 669 - .../moby/buildkit/client/llb/fileop.go | 776 -- .../moby/buildkit/client/llb/marshal.go | 117 - .../moby/buildkit/client/llb/meta.go | 323 - .../moby/buildkit/client/llb/resolver.go | 35 - .../moby/buildkit/client/llb/source.go | 519 - .../moby/buildkit/client/llb/sourcemap.go | 111 - .../moby/buildkit/client/llb/state.go | 570 - .../moby/buildkit/client/ociindex/ociindex.go | 113 - .../github.com/moby/buildkit/client/prune.go | 83 - .../github.com/moby/buildkit/client/solve.go | 488 - .../moby/buildkit/client/workers.go | 70 - .../frontend/gateway/client/client.go | 134 - .../frontend/gateway/client/result.go | 54 - .../buildkit/frontend/gateway/errdefs/exit.go | 34 - .../frontend/gateway/grpcclient/client.go | 1110 -- .../moby/buildkit/frontend/gateway/pb/caps.go | 172 - .../frontend/gateway/pb/gateway.pb.go | 10066 -------------- .../frontend/gateway/pb/gateway.proto | 238 - .../buildkit/frontend/gateway/pb/generate.go | 3 - .../moby/buildkit/identity/randomid.go | 53 - .../moby/buildkit/session/auth/auth.go | 130 - .../moby/buildkit/session/auth/auth.pb.go | 2630 ---- .../moby/buildkit/session/auth/auth.proto | 54 - .../session/auth/authprovider/authprovider.go | 220 - .../session/auth/authprovider/tokenseed.go | 83 - .../moby/buildkit/session/auth/generate.go | 3 - .../buildkit/session/content/attachable.go | 132 - .../moby/buildkit/session/content/caller.go | 91 - .../buildkit/session/filesync/diffcopy.go | 130 - .../buildkit/session/filesync/filesync.go | 326 - .../buildkit/session/filesync/filesync.pb.go | 677 - .../buildkit/session/filesync/filesync.proto | 22 - .../buildkit/session/filesync/generate.go | 3 - .../github.com/moby/buildkit/session/group.go | 88 - .../github.com/moby/buildkit/session/grpc.go | 99 - .../moby/buildkit/session/grpchijack/dial.go | 167 - .../buildkit/session/grpchijack/hijack.go | 15 - .../moby/buildkit/session/manager.go | 224 - .../moby/buildkit/session/secrets/generate.go | 3 - .../moby/buildkit/session/secrets/secrets.go | 30 - .../buildkit/session/secrets/secrets.pb.go | 880 -- .../buildkit/session/secrets/secrets.proto | 19 - .../secretsprovider/secretsprovider.go | 60 - .../session/secrets/secretsprovider/store.go | 65 - .../moby/buildkit/session/session.go | 161 - .../moby/buildkit/session/sshforward/copy.go | 69 - .../buildkit/session/sshforward/generate.go | 3 - .../moby/buildkit/session/sshforward/ssh.go | 118 - .../buildkit/session/sshforward/ssh.pb.go | 909 -- .../buildkit/session/sshforward/ssh.proto | 22 - .../sshforward/sshprovider/agentprovider.go | 244 - .../sshprovider/agentprovider_unix.go | 15 - .../sshprovider/agentprovider_windows.go | 60 - .../moby/buildkit/solver/pb/attr.go | 29 - .../moby/buildkit/solver/pb/caps.go | 334 - .../moby/buildkit/solver/pb/const.go | 25 - .../moby/buildkit/solver/pb/generate.go | 3 - .../moby/buildkit/solver/pb/ops.pb.go | 11333 ---------------- .../moby/buildkit/solver/pb/ops.proto | 350 - .../moby/buildkit/solver/pb/platform.go | 41 - .../moby/buildkit/util/apicaps/caps.go | 169 - .../moby/buildkit/util/apicaps/pb/caps.pb.go | 567 - .../moby/buildkit/util/apicaps/pb/caps.proto | 19 - .../moby/buildkit/util/apicaps/pb/generate.go | 3 - .../util/appdefaults/appdefaults_unix.go | 69 - .../util/appdefaults/appdefaults_windows.go | 31 - .../util/entitlements/entitlements.go | 60 - .../util/flightcontrol/flightcontrol.go | 349 - .../buildkit/util/gitutil/git_protocol.go | 46 - .../buildkit/util/grpcerrors/grpcerrors.go | 218 - .../buildkit/util/grpcerrors/intercept.go | 54 - .../buildkit/util/progress/multireader.go | 77 - .../buildkit/util/progress/multiwriter.go | 104 - .../moby/buildkit/util/progress/progress.go | 261 - .../util/progress/progressui/display.go | 587 - .../util/progress/progressui/printer.go | 292 - .../progress/progresswriter/multiwriter.go | 106 - .../util/progress/progresswriter/printer.go | 94 - .../util/progress/progresswriter/progress.go | 93 - .../util/progress/progresswriter/reset.go | 71 - .../util/progress/progresswriter/writer.go | 46 - .../moby/buildkit/util/sshutil/keyscan.go | 51 - .../util/sshutil/transport_validation.go | 11 - .../moby/buildkit/util/stack/generate.go | 3 - .../moby/buildkit/util/stack/stack.go | 182 - .../moby/buildkit/util/stack/stack.pb.go | 172 - .../moby/buildkit/util/stack/stack.proto | 17 - .../moby/buildkit/util/system/path.go | 18 - .../moby/buildkit/util/system/path_unix.go | 9 - .../moby/buildkit/util/system/path_windows.go | 33 - .../buildkit/util/system/seccomp_linux.go | 29 - .../buildkit/util/system/seccomp_nolinux.go | 7 - .../buildkit/util/system/seccomp_noseccomp.go | 7 - .../opentracing/opentracing-go/LICENSE | 201 - .../opentracing/opentracing-go/README.md | 171 - .../opentracing/opentracing-go/ext.go | 24 - .../opentracing/opentracing-go/ext/field.go | 17 - .../opentracing/opentracing-go/ext/tags.go | 215 - .../opentracing-go/globaltracer.go | 42 - .../opentracing/opentracing-go/go.mod | 5 - .../opentracing/opentracing-go/gocontext.go | 65 - .../opentracing/opentracing-go/log/field.go | 282 - .../opentracing/opentracing-go/log/util.go | 61 - .../opentracing/opentracing-go/noop.go | 64 - .../opentracing/opentracing-go/propagation.go | 176 - .../opentracing/opentracing-go/span.go | 189 - .../opentracing/opentracing-go/tracer.go | 304 - vendor/github.com/tonistiigi/fsutil/LICENSE | 22 - .../tonistiigi/fsutil/chtimes_linux.go | 20 - .../tonistiigi/fsutil/chtimes_nolinux.go | 22 - vendor/github.com/tonistiigi/fsutil/diff.go | 51 - .../tonistiigi/fsutil/diff_containerd.go | 240 - .../tonistiigi/fsutil/diskwriter.go | 351 - .../tonistiigi/fsutil/diskwriter_unix.go | 52 - .../tonistiigi/fsutil/diskwriter_windows.go | 18 - .../tonistiigi/fsutil/followlinks.go | 150 - vendor/github.com/tonistiigi/fsutil/fs.go | 120 - vendor/github.com/tonistiigi/fsutil/go.mod | 20 - .../github.com/tonistiigi/fsutil/hardlinks.go | 48 - .../tonistiigi/fsutil/prefix/match.go | 45 - vendor/github.com/tonistiigi/fsutil/readme.md | 45 - .../github.com/tonistiigi/fsutil/receive.go | 286 - vendor/github.com/tonistiigi/fsutil/send.go | 208 - vendor/github.com/tonistiigi/fsutil/stat.go | 64 - .../github.com/tonistiigi/fsutil/stat_unix.go | 71 - .../tonistiigi/fsutil/stat_windows.go | 16 - .../github.com/tonistiigi/fsutil/tarwriter.go | 80 - .../tonistiigi/fsutil/types/generate.go | 3 - .../tonistiigi/fsutil/types/stat.go | 7 - .../tonistiigi/fsutil/types/stat.pb.go | 929 -- .../tonistiigi/fsutil/types/stat.proto | 19 - .../tonistiigi/fsutil/types/wire.pb.go | 575 - .../tonistiigi/fsutil/types/wire.proto | 21 - .../github.com/tonistiigi/fsutil/validator.go | 93 - vendor/github.com/tonistiigi/fsutil/walker.go | 196 - vendor/github.com/tonistiigi/units/LICENSE | 21 - vendor/github.com/tonistiigi/units/bytes.go | 125 - vendor/github.com/tonistiigi/units/readme.md | 29 - vendor/golang.org/x/crypto/blowfish/block.go | 159 - vendor/golang.org/x/crypto/blowfish/cipher.go | 99 - vendor/golang.org/x/crypto/blowfish/const.go | 199 - .../x/crypto/chacha20/chacha_arm64.go | 17 - .../x/crypto/chacha20/chacha_arm64.s | 307 - .../x/crypto/chacha20/chacha_generic.go | 398 - .../x/crypto/chacha20/chacha_noasm.go | 14 - .../x/crypto/chacha20/chacha_ppc64le.go | 17 - .../x/crypto/chacha20/chacha_ppc64le.s | 449 - .../x/crypto/chacha20/chacha_s390x.go | 27 - .../x/crypto/chacha20/chacha_s390x.s | 224 - vendor/golang.org/x/crypto/chacha20/xor.go | 42 - .../x/crypto/curve25519/curve25519.go | 95 - .../x/crypto/curve25519/curve25519_amd64.go | 241 - .../x/crypto/curve25519/curve25519_amd64.s | 1793 --- .../x/crypto/curve25519/curve25519_generic.go | 828 -- .../x/crypto/curve25519/curve25519_noasm.go | 12 - .../x/crypto/internal/subtle/aliasing.go | 33 - .../crypto/internal/subtle/aliasing_purego.go | 36 - vendor/golang.org/x/crypto/nacl/sign/sign.go | 90 - .../x/crypto/poly1305/bits_compat.go | 40 - .../x/crypto/poly1305/bits_go1.13.go | 22 - .../golang.org/x/crypto/poly1305/mac_noasm.go | 10 - .../golang.org/x/crypto/poly1305/poly1305.go | 99 - .../golang.org/x/crypto/poly1305/sum_amd64.go | 48 - .../golang.org/x/crypto/poly1305/sum_amd64.s | 108 - .../x/crypto/poly1305/sum_generic.go | 310 - .../x/crypto/poly1305/sum_ppc64le.go | 48 - .../x/crypto/poly1305/sum_ppc64le.s | 181 - .../golang.org/x/crypto/poly1305/sum_s390x.go | 76 - .../golang.org/x/crypto/poly1305/sum_s390x.s | 503 - .../golang.org/x/crypto/ssh/agent/client.go | 813 -- .../golang.org/x/crypto/ssh/agent/forward.go | 103 - .../golang.org/x/crypto/ssh/agent/keyring.go | 241 - .../golang.org/x/crypto/ssh/agent/server.go | 570 - vendor/golang.org/x/crypto/ssh/buffer.go | 97 - vendor/golang.org/x/crypto/ssh/certs.go | 546 - vendor/golang.org/x/crypto/ssh/channel.go | 633 - vendor/golang.org/x/crypto/ssh/cipher.go | 781 -- vendor/golang.org/x/crypto/ssh/client.go | 278 - vendor/golang.org/x/crypto/ssh/client_auth.go | 641 - vendor/golang.org/x/crypto/ssh/common.go | 404 - vendor/golang.org/x/crypto/ssh/connection.go | 143 - vendor/golang.org/x/crypto/ssh/doc.go | 21 - vendor/golang.org/x/crypto/ssh/handshake.go | 647 - .../ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go | 93 - vendor/golang.org/x/crypto/ssh/kex.go | 782 -- vendor/golang.org/x/crypto/ssh/keys.go | 1474 -- vendor/golang.org/x/crypto/ssh/mac.go | 61 - vendor/golang.org/x/crypto/ssh/messages.go | 866 -- vendor/golang.org/x/crypto/ssh/mux.go | 351 - vendor/golang.org/x/crypto/ssh/server.go | 720 - vendor/golang.org/x/crypto/ssh/session.go | 647 - vendor/golang.org/x/crypto/ssh/ssh_gss.go | 139 - vendor/golang.org/x/crypto/ssh/streamlocal.go | 116 - vendor/golang.org/x/crypto/ssh/tcpip.go | 474 - vendor/golang.org/x/crypto/ssh/transport.go | 353 - vendor/golang.org/x/net/context/context.go | 56 - vendor/golang.org/x/net/context/go17.go | 73 - vendor/golang.org/x/net/context/go19.go | 21 - vendor/golang.org/x/net/context/pre_go17.go | 301 - vendor/golang.org/x/net/context/pre_go19.go | 110 - vendor/golang.org/x/sync/LICENSE | 27 - vendor/golang.org/x/sync/PATENTS | 22 - vendor/golang.org/x/sync/README.md | 20 - vendor/golang.org/x/sync/errgroup/errgroup.go | 66 - vendor/golang.org/x/sync/go.mod | 1 - .../google.golang.org/grpc/health/client.go | 117 - .../grpc/health/grpc_health_v1/health.pb.go | 313 - .../health/grpc_health_v1/health_grpc.pb.go | 201 - .../google.golang.org/grpc/health/logging.go | 23 - .../google.golang.org/grpc/health/server.go | 163 - 342 files changed, 296 insertions(+), 96563 deletions(-) delete mode 100644 cli/command/image/build_buildkit.go delete mode 100644 cli/command/image/build_session.go create mode 100644 cmd/docker/aliases.go create mode 100644 cmd/docker/builder.go create mode 100644 cmd/docker/builder_test.go delete mode 100644 vendor/github.com/containerd/console/LICENSE delete mode 100644 vendor/github.com/containerd/console/README.md delete mode 100644 vendor/github.com/containerd/console/console.go delete mode 100644 vendor/github.com/containerd/console/console_linux.go delete mode 100644 vendor/github.com/containerd/console/console_unix.go delete mode 100644 vendor/github.com/containerd/console/console_windows.go delete mode 100644 vendor/github.com/containerd/console/go.mod delete mode 100644 vendor/github.com/containerd/console/pty_freebsd_cgo.go delete mode 100644 vendor/github.com/containerd/console/pty_freebsd_nocgo.go delete mode 100644 vendor/github.com/containerd/console/pty_unix.go delete mode 100644 vendor/github.com/containerd/console/tc_darwin.go delete mode 100644 vendor/github.com/containerd/console/tc_freebsd_cgo.go delete mode 100644 vendor/github.com/containerd/console/tc_freebsd_nocgo.go delete mode 100644 vendor/github.com/containerd/console/tc_linux.go delete mode 100644 vendor/github.com/containerd/console/tc_netbsd.go delete mode 100644 vendor/github.com/containerd/console/tc_openbsd_cgo.go delete mode 100644 vendor/github.com/containerd/console/tc_openbsd_nocgo.go delete mode 100644 vendor/github.com/containerd/console/tc_solaris_cgo.go delete mode 100644 vendor/github.com/containerd/console/tc_solaris_nocgo.go delete mode 100644 vendor/github.com/containerd/console/tc_unix.go delete mode 100644 vendor/github.com/containerd/containerd/api/README.md delete mode 100644 vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go delete mode 100644 vendor/github.com/containerd/containerd/api/services/content/v1/content.proto delete mode 100644 vendor/github.com/containerd/containerd/content/adaptor.go delete mode 100644 vendor/github.com/containerd/containerd/content/content.go delete mode 100644 vendor/github.com/containerd/containerd/content/helpers.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/locks.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/readerat.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_bsd.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_openbsd.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_unix.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/store_windows.go delete mode 100644 vendor/github.com/containerd/containerd/content/local/writer.go delete mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_reader.go delete mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_store.go delete mode 100644 vendor/github.com/containerd/containerd/content/proxy/content_writer.go delete mode 100644 vendor/github.com/containerd/containerd/defaults/defaults.go delete mode 100644 vendor/github.com/containerd/containerd/defaults/defaults_unix.go delete mode 100644 vendor/github.com/containerd/containerd/defaults/defaults_windows.go delete mode 100644 vendor/github.com/containerd/containerd/defaults/doc.go delete mode 100644 vendor/github.com/containerd/containerd/filters/adaptor.go delete mode 100644 vendor/github.com/containerd/containerd/filters/filter.go delete mode 100644 vendor/github.com/containerd/containerd/filters/parser.go delete mode 100644 vendor/github.com/containerd/containerd/filters/quote.go delete mode 100644 vendor/github.com/containerd/containerd/filters/scanner.go delete mode 100644 vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go delete mode 100644 vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go delete mode 100644 vendor/github.com/containerd/containerd/remotes/errors/errors.go delete mode 100644 vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go delete mode 100644 vendor/github.com/containerd/containerd/version/version.go delete mode 100644 vendor/github.com/containerd/continuity/LICENSE delete mode 100644 vendor/github.com/containerd/continuity/README.md delete mode 100644 vendor/github.com/containerd/continuity/go.mod delete mode 100644 vendor/github.com/containerd/continuity/sysx/README.md delete mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_linux.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_solaris.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/nodata_unix.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/xattr.go delete mode 100644 vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go delete mode 100644 vendor/github.com/containerd/typeurl/LICENSE delete mode 100644 vendor/github.com/containerd/typeurl/README.md delete mode 100644 vendor/github.com/containerd/typeurl/doc.go delete mode 100644 vendor/github.com/containerd/typeurl/go.mod delete mode 100644 vendor/github.com/containerd/typeurl/types.go delete mode 100644 vendor/github.com/gofrs/flock/LICENSE delete mode 100644 vendor/github.com/gofrs/flock/README.md delete mode 100644 vendor/github.com/gofrs/flock/flock.go delete mode 100644 vendor/github.com/gofrs/flock/flock_unix.go delete mode 100644 vendor/github.com/gofrs/flock/flock_winapi.go delete mode 100644 vendor/github.com/gofrs/flock/flock_windows.go delete mode 100644 vendor/github.com/gogo/googleapis/LICENSE delete mode 100644 vendor/github.com/gogo/googleapis/Readme.md delete mode 100644 vendor/github.com/gogo/googleapis/go.mod delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/code.pb.go delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/code.proto delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/error_details.proto delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/status.pb.go delete mode 100644 vendor/github.com/gogo/googleapis/google/rpc/status.proto delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod delete mode 100644 vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go delete mode 100644 vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go delete mode 100644 vendor/github.com/jaguilar/vt100/LICENSE delete mode 100644 vendor/github.com/jaguilar/vt100/README.md delete mode 100644 vendor/github.com/jaguilar/vt100/command.go delete mode 100644 vendor/github.com/jaguilar/vt100/go.mod delete mode 100644 vendor/github.com/jaguilar/vt100/scanner.go delete mode 100644 vendor/github.com/jaguilar/vt100/vt100.go delete mode 100644 vendor/github.com/moby/buildkit/api/services/control/control.pb.go delete mode 100644 vendor/github.com/moby/buildkit/api/services/control/control.proto delete mode 100644 vendor/github.com/moby/buildkit/api/services/control/generate.go delete mode 100644 vendor/github.com/moby/buildkit/api/types/generate.go delete mode 100644 vendor/github.com/moby/buildkit/api/types/worker.pb.go delete mode 100644 vendor/github.com/moby/buildkit/api/types/worker.proto delete mode 100644 vendor/github.com/moby/buildkit/client/build.go delete mode 100644 vendor/github.com/moby/buildkit/client/buildid/metadata.go delete mode 100644 vendor/github.com/moby/buildkit/client/client.go delete mode 100644 vendor/github.com/moby/buildkit/client/client_unix.go delete mode 100644 vendor/github.com/moby/buildkit/client/client_windows.go delete mode 100644 vendor/github.com/moby/buildkit/client/connhelper/connhelper.go delete mode 100644 vendor/github.com/moby/buildkit/client/diskusage.go delete mode 100644 vendor/github.com/moby/buildkit/client/exporters.go delete mode 100644 vendor/github.com/moby/buildkit/client/filter.go delete mode 100644 vendor/github.com/moby/buildkit/client/graph.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/async.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/definition.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/exec.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/fileop.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/marshal.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/meta.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/resolver.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/source.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/sourcemap.go delete mode 100644 vendor/github.com/moby/buildkit/client/llb/state.go delete mode 100644 vendor/github.com/moby/buildkit/client/ociindex/ociindex.go delete mode 100644 vendor/github.com/moby/buildkit/client/prune.go delete mode 100644 vendor/github.com/moby/buildkit/client/solve.go delete mode 100644 vendor/github.com/moby/buildkit/client/workers.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/client/client.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/client/result.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/errdefs/exit.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto delete mode 100644 vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go delete mode 100644 vendor/github.com/moby/buildkit/identity/randomid.go delete mode 100644 vendor/github.com/moby/buildkit/session/auth/auth.go delete mode 100644 vendor/github.com/moby/buildkit/session/auth/auth.pb.go delete mode 100644 vendor/github.com/moby/buildkit/session/auth/auth.proto delete mode 100644 vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go delete mode 100644 vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go delete mode 100644 vendor/github.com/moby/buildkit/session/auth/generate.go delete mode 100644 vendor/github.com/moby/buildkit/session/content/attachable.go delete mode 100644 vendor/github.com/moby/buildkit/session/content/caller.go delete mode 100644 vendor/github.com/moby/buildkit/session/filesync/diffcopy.go delete mode 100644 vendor/github.com/moby/buildkit/session/filesync/filesync.go delete mode 100644 vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go delete mode 100644 vendor/github.com/moby/buildkit/session/filesync/filesync.proto delete mode 100644 vendor/github.com/moby/buildkit/session/filesync/generate.go delete mode 100644 vendor/github.com/moby/buildkit/session/group.go delete mode 100644 vendor/github.com/moby/buildkit/session/grpc.go delete mode 100644 vendor/github.com/moby/buildkit/session/grpchijack/dial.go delete mode 100644 vendor/github.com/moby/buildkit/session/grpchijack/hijack.go delete mode 100644 vendor/github.com/moby/buildkit/session/manager.go delete mode 100644 vendor/github.com/moby/buildkit/session/secrets/generate.go delete mode 100644 vendor/github.com/moby/buildkit/session/secrets/secrets.go delete mode 100644 vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go delete mode 100644 vendor/github.com/moby/buildkit/session/secrets/secrets.proto delete mode 100644 vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go delete mode 100644 vendor/github.com/moby/buildkit/session/secrets/secretsprovider/store.go delete mode 100644 vendor/github.com/moby/buildkit/session/session.go delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/copy.go delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/generate.go delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/ssh.go delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/ssh.proto delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_unix.go delete mode 100644 vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_windows.go delete mode 100644 vendor/github.com/moby/buildkit/solver/pb/attr.go delete mode 100644 vendor/github.com/moby/buildkit/solver/pb/caps.go delete mode 100644 vendor/github.com/moby/buildkit/solver/pb/const.go delete mode 100644 vendor/github.com/moby/buildkit/solver/pb/generate.go delete mode 100644 vendor/github.com/moby/buildkit/solver/pb/ops.pb.go delete mode 100644 vendor/github.com/moby/buildkit/solver/pb/ops.proto delete mode 100644 vendor/github.com/moby/buildkit/solver/pb/platform.go delete mode 100644 vendor/github.com/moby/buildkit/util/apicaps/caps.go delete mode 100644 vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go delete mode 100644 vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto delete mode 100644 vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go delete mode 100644 vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go delete mode 100644 vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go delete mode 100644 vendor/github.com/moby/buildkit/util/entitlements/entitlements.go delete mode 100644 vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go delete mode 100644 vendor/github.com/moby/buildkit/util/gitutil/git_protocol.go delete mode 100644 vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go delete mode 100644 vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/multireader.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/multiwriter.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progress.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progressui/display.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progressui/printer.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progresswriter/multiwriter.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progresswriter/printer.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progresswriter/progress.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progresswriter/reset.go delete mode 100644 vendor/github.com/moby/buildkit/util/progress/progresswriter/writer.go delete mode 100644 vendor/github.com/moby/buildkit/util/sshutil/keyscan.go delete mode 100644 vendor/github.com/moby/buildkit/util/sshutil/transport_validation.go delete mode 100644 vendor/github.com/moby/buildkit/util/stack/generate.go delete mode 100644 vendor/github.com/moby/buildkit/util/stack/stack.go delete mode 100644 vendor/github.com/moby/buildkit/util/stack/stack.pb.go delete mode 100644 vendor/github.com/moby/buildkit/util/stack/stack.proto delete mode 100644 vendor/github.com/moby/buildkit/util/system/path.go delete mode 100644 vendor/github.com/moby/buildkit/util/system/path_unix.go delete mode 100644 vendor/github.com/moby/buildkit/util/system/path_windows.go delete mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_linux.go delete mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go delete mode 100644 vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/LICENSE delete mode 100644 vendor/github.com/opentracing/opentracing-go/README.md delete mode 100644 vendor/github.com/opentracing/opentracing-go/ext.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/ext/field.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/ext/tags.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/globaltracer.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/go.mod delete mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/log/field.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/log/util.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/noop.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/propagation.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/span.go delete mode 100644 vendor/github.com/opentracing/opentracing-go/tracer.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/LICENSE delete mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_linux.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diff.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diff_containerd.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/followlinks.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/fs.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/go.mod delete mode 100644 vendor/github.com/tonistiigi/fsutil/hardlinks.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/prefix/match.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/readme.md delete mode 100644 vendor/github.com/tonistiigi/fsutil/receive.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/send.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat_unix.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/stat_windows.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/tarwriter.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/generate.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.pb.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/stat.proto delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.pb.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/types/wire.proto delete mode 100644 vendor/github.com/tonistiigi/fsutil/validator.go delete mode 100644 vendor/github.com/tonistiigi/fsutil/walker.go delete mode 100644 vendor/github.com/tonistiigi/units/LICENSE delete mode 100644 vendor/github.com/tonistiigi/units/bytes.go delete mode 100644 vendor/github.com/tonistiigi/units/readme.md delete mode 100644 vendor/golang.org/x/crypto/blowfish/block.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/cipher.go delete mode 100644 vendor/golang.org/x/crypto/blowfish/const.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_arm64.s delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_generic.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_noasm.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.go delete mode 100644 vendor/golang.org/x/crypto/chacha20/chacha_s390x.s delete mode 100644 vendor/golang.org/x/crypto/chacha20/xor.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_generic.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go delete mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing.go delete mode 100644 vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go delete mode 100644 vendor/golang.org/x/crypto/nacl/sign/sign.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/bits_compat.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/bits_go1.13.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/mac_noasm.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_generic.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.go delete mode 100644 vendor/golang.org/x/crypto/poly1305/sum_s390x.s delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/forward.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/keyring.go delete mode 100644 vendor/golang.org/x/crypto/ssh/agent/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 vendor/golang.org/x/crypto/ssh/ssh_gss.go delete mode 100644 vendor/golang.org/x/crypto/ssh/streamlocal.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go delete mode 100644 vendor/golang.org/x/net/context/context.go delete mode 100644 vendor/golang.org/x/net/context/go17.go delete mode 100644 vendor/golang.org/x/net/context/go19.go delete mode 100644 vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 vendor/golang.org/x/sync/LICENSE delete mode 100644 vendor/golang.org/x/sync/PATENTS delete mode 100644 vendor/golang.org/x/sync/README.md delete mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go delete mode 100644 vendor/golang.org/x/sync/go.mod delete mode 100644 vendor/google.golang.org/grpc/health/client.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go delete mode 100644 vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go delete mode 100644 vendor/google.golang.org/grpc/health/logging.go delete mode 100644 vendor/google.golang.org/grpc/health/server.go diff --git a/cli-plugins/manager/manager.go b/cli-plugins/manager/manager.go index 50f7208ea334..89727f7cac65 100644 --- a/cli-plugins/manager/manager.go +++ b/cli-plugins/manager/manager.go @@ -104,6 +104,36 @@ func listPluginCandidates(dirs []string) (map[string][]string, error) { return result, nil } +// GetPlugin returns a plugin on the system by its name +func GetPlugin(name string, dockerCli command.Cli, rootcmd *cobra.Command) (*Plugin, error) { + pluginDirs, err := getPluginDirs(dockerCli) + if err != nil { + return nil, err + } + + candidates, err := listPluginCandidates(pluginDirs) + if err != nil { + return nil, err + } + + if paths, ok := candidates[name]; ok { + if len(paths) == 0 { + return nil, errPluginNotFound(name) + } + c := &candidate{paths[0]} + p, err := newPlugin(c, rootcmd) + if err != nil { + return nil, err + } + if !IsNotFound(p.Err) { + p.ShadowedPaths = paths[1:] + } + return &p, nil + } + + return nil, errPluginNotFound(name) +} + // ListPlugins produces a list of the plugins available on the system func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) { pluginDirs, err := getPluginDirs(dockerCli) diff --git a/cli-plugins/manager/manager_test.go b/cli-plugins/manager/manager_test.go index 0da315c5857e..414e6899e70e 100644 --- a/cli-plugins/manager/manager_test.go +++ b/cli-plugins/manager/manager_test.go @@ -82,6 +82,29 @@ func TestListPluginCandidates(t *testing.T) { assert.DeepEqual(t, candidates, exp) } +func TestGetPlugin(t *testing.T) { + dir := fs.NewDir(t, t.Name(), + fs.WithFile("docker-bbb", ` +#!/bin/sh +echo '{"SchemaVersion":"0.1.0"}'`, fs.WithMode(0777)), + fs.WithFile("docker-aaa", ` +#!/bin/sh +echo '{"SchemaVersion":"0.1.0"}'`, fs.WithMode(0777)), + ) + defer dir.Remove() + + cli := test.NewFakeCli(nil) + cli.SetConfigFile(&configfile.ConfigFile{CLIPluginsExtraDirs: []string{dir.Path()}}) + + plugin, err := GetPlugin("bbb", cli, &cobra.Command{}) + assert.NilError(t, err) + assert.Equal(t, plugin.Name, "bbb") + + _, err = GetPlugin("ccc", cli, &cobra.Command{}) + assert.Error(t, err, "Error: No such CLI plugin: ccc") + assert.Assert(t, IsNotFound(err)) +} + func TestListPluginsIsSorted(t *testing.T) { dir := fs.NewDir(t, t.Name(), fs.WithFile("docker-bbb", ` diff --git a/cli/command/cli.go b/cli/command/cli.go index fe6444f42f8d..35d2d54018fb 100644 --- a/cli/command/cli.go +++ b/cli/command/cli.go @@ -7,7 +7,6 @@ import ( "os" "path/filepath" "runtime" - "strconv" "strings" "time" @@ -171,20 +170,6 @@ func (cli *DockerCli) ContentTrustEnabled() bool { return cli.contentTrust } -// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting -// or otherwise the client-side DOCKER_BUILDKIT environment variable -func BuildKitEnabled(si ServerInfo) (bool, error) { - buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit - if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" { - var err error - buildkitEnabled, err = strconv.ParseBool(buildkitEnv) - if err != nil { - return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value") - } - } - return buildkitEnabled, nil -} - // ManifestStore returns a store for local manifests func (cli *DockerCli) ManifestStore() manifeststore.Store { // TODO: support override default location from config file diff --git a/cli/command/image/build.go b/cli/command/image/build.go index 64b51113c018..b74f64ae727e 100644 --- a/cli/command/image/build.go +++ b/cli/command/image/build.go @@ -5,7 +5,6 @@ import ( "bufio" "bytes" "context" - "encoding/csv" "encoding/json" "fmt" "io" @@ -57,7 +56,6 @@ type buildOptions struct { isolation string quiet bool noCache bool - progress string rm bool forceRm bool pull bool @@ -71,9 +69,6 @@ type buildOptions struct { stream bool platform string untrusted bool - secrets []string - ssh []string - outputs []string } // dockerfileFromStdin returns true when the user specified that the Dockerfile @@ -118,40 +113,26 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command { flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") flags.Var(options.ulimits, "ulimit", "Ulimit options") - flags.SetAnnotation("ulimit", "no-buildkit", nil) flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") flags.VarP(&options.memory, "memory", "m", "Memory limit") - flags.SetAnnotation("memory", "no-buildkit", nil) flags.Var(&options.memorySwap, "memory-swap", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.SetAnnotation("memory-swap", "no-buildkit", nil) flags.Var(&options.shmSize, "shm-size", "Size of /dev/shm") - flags.SetAnnotation("shm-size", "no-buildkit", nil) flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.SetAnnotation("cpu-shares", "no-buildkit", nil) flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") - flags.SetAnnotation("cpu-period", "no-buildkit", nil) flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") - flags.SetAnnotation("cpu-quota", "no-buildkit", nil) flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.SetAnnotation("cpuset-cpus", "no-buildkit", nil) flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.SetAnnotation("cpuset-mems", "no-buildkit", nil) flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.SetAnnotation("cgroup-parent", "no-buildkit", nil) flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") flags.Var(&options.labels, "label", "Set metadata for an image") flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") - flags.SetAnnotation("rm", "no-buildkit", nil) flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") - flags.SetAnnotation("force-rm", "no-buildkit", nil) flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources") flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip") - flags.SetAnnotation("compress", "no-buildkit", nil) flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options") - flags.SetAnnotation("security-opt", "no-buildkit", nil) flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build") flags.SetAnnotation("network", "version", []string{"1.25"}) flags.Var(&options.extraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") @@ -160,10 +141,6 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command { command.AddTrustVerificationFlags(flags, &options.untrusted, dockerCli.ContentTrustEnabled()) - flags.StringVar(&options.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable") - flags.SetAnnotation("platform", "version", []string{"1.38"}) - flags.SetAnnotation("platform", "buildkit", nil) - flags.BoolVar(&options.squash, "squash", false, "Squash newly built layers into a single new layer") flags.SetAnnotation("squash", "experimental", nil) flags.SetAnnotation("squash", "version", []string{"1.25"}) @@ -171,21 +148,6 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command { flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context") flags.MarkHidden("stream") - flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output") - flags.SetAnnotation("progress", "buildkit", nil) - - flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret") - flags.SetAnnotation("secret", "version", []string{"1.39"}) - flags.SetAnnotation("secret", "buildkit", nil) - - flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|[=|[,]])") - flags.SetAnnotation("ssh", "version", []string{"1.39"}) - flags.SetAnnotation("ssh", "buildkit", nil) - - flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: type=local,dest=path)") - flags.SetAnnotation("output", "version", []string{"1.40"}) - flags.SetAnnotation("output", "buildkit", nil) - return cmd } @@ -207,15 +169,8 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error { // nolint: gocyclo func runBuild(dockerCli command.Cli, options buildOptions) error { - buildkitEnabled, err := command.BuildKitEnabled(dockerCli.ServerInfo()) - if err != nil { - return err - } - if buildkitEnabled { - return runBuildBuildKit(dockerCli, options) - } - var ( + err error buildCtx io.ReadCloser dockerfileCtx io.ReadCloser contextDir string @@ -226,6 +181,12 @@ func runBuild(dockerCli command.Cli, options buildOptions) error { remote string ) + if !options.quiet { + _, _ = fmt.Fprint(dockerCli.Out(), `WARNING: The legacy builder is in use and will build your image in an inefficient way. + +`) + } + if options.stream { _, _ = fmt.Fprint(dockerCli.Err(), `DEPRECATED: The experimental --stream flag has been removed and the build context will be sent non-streaming. Enable BuildKit instead with DOCKER_BUILDKIT=1 @@ -609,58 +570,3 @@ func imageBuildOptions(dockerCli command.Cli, options buildOptions) types.ImageB Platform: options.platform, } } - -func parseOutputs(inp []string) ([]types.ImageBuildOutput, error) { - var outs []types.ImageBuildOutput - if len(inp) == 0 { - return nil, nil - } - for _, s := range inp { - csvReader := csv.NewReader(strings.NewReader(s)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - if len(fields) == 1 && fields[0] == s && !strings.HasPrefix(s, "type=") { - if s == "-" { - outs = append(outs, types.ImageBuildOutput{ - Type: "tar", - Attrs: map[string]string{ - "dest": s, - }, - }) - } else { - outs = append(outs, types.ImageBuildOutput{ - Type: "local", - Attrs: map[string]string{ - "dest": s, - }, - }) - } - continue - } - - out := types.ImageBuildOutput{ - Attrs: map[string]string{}, - } - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return nil, errors.Errorf("invalid value %s", field) - } - key := strings.ToLower(parts[0]) - value := parts[1] - switch key { - case "type": - out.Type = value - default: - out.Attrs[key] = value - } - } - if out.Type == "" { - return nil, errors.Errorf("type is required for output") - } - outs = append(outs, out) - } - return outs, nil -} diff --git a/cli/command/image/build_buildkit.go b/cli/command/image/build_buildkit.go deleted file mode 100644 index f65514d6e1f3..000000000000 --- a/cli/command/image/build_buildkit.go +++ /dev/null @@ -1,525 +0,0 @@ -package image - -import ( - "bytes" - "context" - "encoding/csv" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path/filepath" - "strings" - - "github.com/containerd/console" - "github.com/containerd/containerd/platforms" - "github.com/docker/cli/cli" - "github.com/docker/cli/cli/command" - "github.com/docker/cli/cli/command/image/build" - "github.com/docker/cli/opts" - "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/urlutil" - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth/authprovider" - "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/session/secrets/secretsprovider" - "github.com/moby/buildkit/session/sshforward/sshprovider" - "github.com/moby/buildkit/util/appcontext" - "github.com/moby/buildkit/util/gitutil" - "github.com/moby/buildkit/util/progress/progressui" - "github.com/moby/buildkit/util/progress/progresswriter" - "github.com/pkg/errors" - fsutiltypes "github.com/tonistiigi/fsutil/types" - "github.com/tonistiigi/go-rosetta" - "golang.org/x/sync/errgroup" -) - -const uploadRequestRemote = "upload-request" - -var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin and flag correspond to Dockerfiles") - -//nolint: gocyclo -func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error { - ctx := appcontext.Context() - - s, err := trySession(dockerCli, options.context, false) - if err != nil { - return err - } - if s == nil { - return errors.Errorf("buildkit not supported by daemon") - } - - if options.imageIDFile != "" { - // Avoid leaving a stale file if we eventually fail - if err := os.Remove(options.imageIDFile); err != nil && !os.IsNotExist(err) { - return errors.Wrap(err, "removing image ID file") - } - } - - var ( - remote string - body io.Reader - dockerfileName = options.dockerfileName - dockerfileReader io.ReadCloser - dockerfileDir string - contextDir string - ) - - stdoutUsed := false - - switch { - case options.contextFromStdin(): - if options.dockerfileFromStdin() { - return errStdinConflict - } - rc, isArchive, err := build.DetectArchiveReader(dockerCli.In()) - if err != nil { - return err - } - if isArchive { - body = rc - remote = uploadRequestRemote - } else { - if options.dockerfileName != "" { - return errDockerfileConflict - } - dockerfileReader = rc - remote = clientSessionRemote - // TODO: make fssync handle empty contextdir - contextDir, _ = ioutil.TempDir("", "empty-dir") - defer os.RemoveAll(contextDir) - } - case isLocalDir(options.context): - contextDir = options.context - if options.dockerfileFromStdin() { - dockerfileReader = dockerCli.In() - } else if options.dockerfileName != "" { - dockerfileName = filepath.Base(options.dockerfileName) - dockerfileDir = filepath.Dir(options.dockerfileName) - } else { - dockerfileDir = options.context - } - remote = clientSessionRemote - case urlutil.IsGitURL(options.context): - remote = options.context - case urlutil.IsURL(options.context): - remote = options.context - default: - return errors.Errorf("unable to prepare context: path %q not found", options.context) - } - - if dockerfileReader != nil { - dockerfileName = build.DefaultDockerfileName - dockerfileDir, err = build.WriteTempDockerfile(dockerfileReader) - if err != nil { - return err - } - defer os.RemoveAll(dockerfileDir) - } - - outputs, err := parseOutputs(options.outputs) - if err != nil { - return errors.Wrapf(err, "failed to parse outputs") - } - - for _, out := range outputs { - switch out.Type { - case "local": - // dest is handled on client side for local exporter - outDir, ok := out.Attrs["dest"] - if !ok { - return errors.Errorf("dest is required for local output") - } - delete(out.Attrs, "dest") - s.Allow(filesync.NewFSSyncTargetDir(outDir)) - case "tar": - // dest is handled on client side for tar exporter - outFile, ok := out.Attrs["dest"] - if !ok { - return errors.Errorf("dest is required for tar output") - } - var w io.WriteCloser - if outFile == "-" { - if _, err := console.ConsoleFromFile(os.Stdout); err == nil { - return errors.Errorf("refusing to write output to console") - } - w = os.Stdout - stdoutUsed = true - } else { - f, err := os.Create(outFile) - if err != nil { - return errors.Wrapf(err, "failed to open %s", outFile) - } - w = f - } - output := func(map[string]string) (io.WriteCloser, error) { return w, nil } - s.Allow(filesync.NewFSSyncTarget(output)) - } - } - - if dockerfileDir != "" { - s.Allow(filesync.NewFSSyncProvider([]filesync.SyncedDir{ - { - Name: "context", - Dir: contextDir, - Map: resetUIDAndGID, - }, - { - Name: "dockerfile", - Dir: dockerfileDir, - }, - })) - } - - dockerAuthProvider := authprovider.NewDockerAuthProvider(os.Stderr) - s.Allow(dockerAuthProvider) - if len(options.secrets) > 0 { - sp, err := parseSecretSpecs(options.secrets) - if err != nil { - return errors.Wrapf(err, "could not parse secrets: %v", options.secrets) - } - s.Allow(sp) - } - - sshSpecs := options.ssh - if len(sshSpecs) == 0 && isGitSSH(remote) { - sshSpecs = []string{"default"} - } - if len(sshSpecs) > 0 { - sshp, err := parseSSHSpecs(sshSpecs) - if err != nil { - return errors.Wrapf(err, "could not parse ssh: %v", sshSpecs) - } - s.Allow(sshp) - } - - eg, ctx := errgroup.WithContext(ctx) - - dialSession := func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - return dockerCli.Client().DialHijack(ctx, "/session", proto, meta) - } - eg.Go(func() error { - return s.Run(context.TODO(), dialSession) - }) - - buildID := stringid.GenerateRandomID() - if body != nil { - eg.Go(func() error { - buildOptions := types.ImageBuildOptions{ - Version: types.BuilderBuildKit, - BuildID: uploadRequestRemote + ":" + buildID, - } - - response, err := dockerCli.Client().ImageBuild(context.Background(), body, buildOptions) - if err != nil { - return err - } - defer response.Body.Close() - return nil - }) - } - - if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && options.progress == "auto" { - options.progress = v - } - - if strings.EqualFold(options.platform, "local") { - p := platforms.DefaultSpec() - p.Architecture = rosetta.NativeArch() // current binary architecture might be emulated - options.platform = platforms.Format(p) - } - - eg.Go(func() error { - defer func() { // make sure the Status ends cleanly on build errors - s.Close() - }() - - buildOptions := imageBuildOptions(dockerCli, options) - buildOptions.Version = types.BuilderBuildKit - buildOptions.Dockerfile = dockerfileName - // buildOptions.AuthConfigs = authConfigs // handled by session - buildOptions.RemoteContext = remote - buildOptions.SessionID = s.ID() - buildOptions.BuildID = buildID - buildOptions.Outputs = outputs - return doBuild(ctx, eg, dockerCli, stdoutUsed, options, buildOptions, dockerAuthProvider) - }) - - return eg.Wait() -} - -//nolint: gocyclo -func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, stdoutUsed bool, options buildOptions, buildOptions types.ImageBuildOptions, at session.Attachable) (finalErr error) { - response, err := dockerCli.Client().ImageBuild(context.Background(), nil, buildOptions) - if err != nil { - return err - } - defer response.Body.Close() - - done := make(chan struct{}) - defer close(done) - eg.Go(func() error { - select { - case <-ctx.Done(): - return dockerCli.Client().BuildCancel(context.TODO(), buildOptions.BuildID) - case <-done: - } - return nil - }) - - t := newTracer() - ssArr := []*client.SolveStatus{} - - if err := opts.ValidateProgressOutput(options.progress); err != nil { - return err - } - - displayStatus := func(out *os.File, displayCh chan *client.SolveStatus) { - var c console.Console - // TODO: Handle tty output in non-tty environment. - if cons, err := console.ConsoleFromFile(out); err == nil && (options.progress == "auto" || options.progress == "tty") { - c = cons - } - // not using shared context to not disrupt display but let it finish reporting errors - eg.Go(func() error { - return progressui.DisplaySolveStatus(context.TODO(), "", c, out, displayCh) - }) - if s, ok := at.(interface { - SetLogger(progresswriter.Logger) - }); ok { - s.SetLogger(func(s *client.SolveStatus) { - displayCh <- s - }) - } - } - - if options.quiet { - eg.Go(func() error { - // TODO: make sure t.displayCh closes - for ss := range t.displayCh { - ssArr = append(ssArr, ss) - } - <-done - // TODO: verify that finalErr is indeed set when error occurs - if finalErr != nil { - displayCh := make(chan *client.SolveStatus) - go func() { - for _, ss := range ssArr { - displayCh <- ss - } - close(displayCh) - }() - displayStatus(os.Stderr, displayCh) - } - return nil - }) - } else { - displayStatus(os.Stderr, t.displayCh) - } - defer close(t.displayCh) - - buf := bytes.NewBuffer(nil) - - imageID := "" - writeAux := func(msg jsonmessage.JSONMessage) { - if msg.ID == "moby.image.id" { - var result types.BuildResult - if err := json.Unmarshal(*msg.Aux, &result); err != nil { - fmt.Fprintf(dockerCli.Err(), "failed to parse aux message: %v", err) - } - imageID = result.ID - return - } - t.write(msg) - } - - err = jsonmessage.DisplayJSONMessagesStream(response.Body, buf, dockerCli.Out().FD(), dockerCli.Out().IsTerminal(), writeAux) - if err != nil { - if jerr, ok := err.(*jsonmessage.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - // - // TODO: we may want to use Aux messages with ID "moby.image.id" regardless of options.quiet (i.e. don't send HTTP param q=1) - // instead of assuming that output is image ID if options.quiet. - if options.quiet && !stdoutUsed { - imageID = buf.String() - fmt.Fprint(dockerCli.Out(), imageID) - } - - if options.imageIDFile != "" { - if imageID == "" { - return errors.Errorf("cannot write %s because server did not provide an image ID", options.imageIDFile) - } - imageID = strings.TrimSpace(imageID) - if err := ioutil.WriteFile(options.imageIDFile, []byte(imageID), 0666); err != nil { - return errors.Wrap(err, "cannot write image ID file") - } - } - return err -} - -func resetUIDAndGID(_ string, s *fsutiltypes.Stat) bool { - s.Uid = 0 - s.Gid = 0 - return true -} - -type tracer struct { - displayCh chan *client.SolveStatus -} - -func newTracer() *tracer { - return &tracer{ - displayCh: make(chan *client.SolveStatus), - } -} - -func (t *tracer) write(msg jsonmessage.JSONMessage) { - var resp controlapi.StatusResponse - - if msg.ID != "moby.buildkit.trace" { - return - } - - var dt []byte - // ignoring all messages that are not understood - if err := json.Unmarshal(*msg.Aux, &dt); err != nil { - return - } - if err := (&resp).Unmarshal(dt); err != nil { - return - } - - s := client.SolveStatus{} - for _, v := range resp.Vertexes { - s.Vertexes = append(s.Vertexes, &client.Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - }) - } - for _, v := range resp.Statuses { - s.Statuses = append(s.Statuses, &client.VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Total: v.Total, - Current: v.Current, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for _, v := range resp.Logs { - s.Logs = append(s.Logs, &client.VertexLog{ - Vertex: v.Vertex, - Stream: int(v.Stream), - Data: v.Msg, - Timestamp: v.Timestamp, - }) - } - - t.displayCh <- &s -} - -func parseSecretSpecs(sl []string) (session.Attachable, error) { - fs := make([]secretsprovider.Source, 0, len(sl)) - for _, v := range sl { - s, err := parseSecret(v) - if err != nil { - return nil, err - } - fs = append(fs, *s) - } - store, err := secretsprovider.NewStore(fs) - if err != nil { - return nil, err - } - return secretsprovider.NewSecretProvider(store), nil -} - -func parseSecret(value string) (*secretsprovider.Source, error) { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return nil, errors.Wrap(err, "failed to parse csv secret") - } - - fs := secretsprovider.Source{} - - var typ string - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return nil, errors.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - if value != "file" && value != "env" { - return nil, errors.Errorf("unsupported secret type %q", value) - } - typ = value - case "id": - fs.ID = value - case "source", "src": - fs.FilePath = value - case "env": - fs.Env = value - default: - return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - if typ == "env" && fs.Env == "" { - fs.Env = fs.FilePath - fs.FilePath = "" - } - return &fs, nil -} - -func parseSSHSpecs(sl []string) (session.Attachable, error) { - configs := make([]sshprovider.AgentConfig, 0, len(sl)) - for _, v := range sl { - c := parseSSH(v) - configs = append(configs, *c) - } - return sshprovider.NewSSHAgentProvider(configs) -} - -func parseSSH(value string) *sshprovider.AgentConfig { - parts := strings.SplitN(value, "=", 2) - cfg := sshprovider.AgentConfig{ - ID: parts[0], - } - if len(parts) > 1 { - cfg.Paths = strings.Split(parts[1], ",") - } - return &cfg -} - -func isGitSSH(url string) bool { - _, gitProtocol := gitutil.ParseProtocol(url) - return gitProtocol == gitutil.SSHProtocol -} diff --git a/cli/command/image/build_session.go b/cli/command/image/build_session.go deleted file mode 100644 index 0f15f51fb456..000000000000 --- a/cli/command/image/build_session.go +++ /dev/null @@ -1,69 +0,0 @@ -package image - -import ( - "context" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/cli/cli/command" - cliconfig "github.com/docker/cli/cli/config" - "github.com/docker/docker/api/types/versions" - "github.com/moby/buildkit/session" - "github.com/pkg/errors" -) - -const clientSessionRemote = "client-session" - -func isSessionSupported(dockerCli command.Cli, forStream bool) bool { - if !forStream && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") { - return true - } - return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31") -} - -func trySession(dockerCli command.Cli, contextDir string, forStream bool) (*session.Session, error) { - if !isSessionSupported(dockerCli, forStream) { - return nil, nil - } - sharedKey := getBuildSharedKey(contextDir) - s, err := session.NewSession(context.Background(), filepath.Base(contextDir), sharedKey) - if err != nil { - return nil, errors.Wrap(err, "failed to create session") - } - return s, nil -} - -func getBuildSharedKey(dir string) string { - // build session is hash of build dir with node based randomness - s := sha256.Sum256([]byte(fmt.Sprintf("%s:%s", tryNodeIdentifier(), dir))) - return hex.EncodeToString(s[:]) -} - -func tryNodeIdentifier() string { - out := cliconfig.Dir() // return config dir as default on permission error - if err := os.MkdirAll(cliconfig.Dir(), 0700); err == nil { - sessionFile := filepath.Join(cliconfig.Dir(), ".buildNodeID") - if _, err := os.Lstat(sessionFile); err != nil { - if os.IsNotExist(err) { // create a new file with stored randomness - b := make([]byte, 32) - if _, err := rand.Read(b); err != nil { - return out - } - if err := ioutil.WriteFile(sessionFile, []byte(hex.EncodeToString(b)), 0600); err != nil { - return out - } - } - } - - dt, err := ioutil.ReadFile(sessionFile) - if err == nil { - return string(dt) - } - } - return out -} diff --git a/cli/command/image/build_test.go b/cli/command/image/build_test.go index 4317a74941fa..5c9d814c767c 100644 --- a/cli/command/image/build_test.go +++ b/cli/command/image/build_test.go @@ -5,7 +5,6 @@ import ( "bytes" "compress/gzip" "context" - "fmt" "io" "io/ioutil" "os" @@ -18,7 +17,6 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/pkg/archive" "github.com/google/go-cmp/cmp" - "github.com/moby/buildkit/session/secrets/secretsprovider" "gotest.tools/v3/assert" "gotest.tools/v3/env" "gotest.tools/v3/fs" @@ -182,75 +180,6 @@ RUN echo hello world assert.DeepEqual(t, fakeBuild.filenames(t), []string{"Dockerfile"}) } -func TestParseSecret(t *testing.T) { - type testcase struct { - value string - errExpected bool - errMatch string - source *secretsprovider.Source - } - var testcases = []testcase{ - { - value: "", - errExpected: true, - }, { - value: "foobar", - errExpected: true, - errMatch: "must be a key=value pair", - }, { - value: "foo,bar", - errExpected: true, - errMatch: "must be a key=value pair", - }, { - value: "foo=bar", - errExpected: true, - errMatch: "unexpected key", - }, { - value: "src=somefile", - source: &secretsprovider.Source{FilePath: "somefile"}, - }, { - value: "source=somefile", - source: &secretsprovider.Source{FilePath: "somefile"}, - }, { - value: "id=mysecret", - source: &secretsprovider.Source{ID: "mysecret"}, - }, { - value: "id=mysecret,src=somefile", - source: &secretsprovider.Source{ID: "mysecret", FilePath: "somefile"}, - }, { - value: "id=mysecret,source=somefile,type=file", - source: &secretsprovider.Source{ID: "mysecret", FilePath: "somefile"}, - }, { - value: "id=mysecret,src=somefile,src=othersecretfile", - source: &secretsprovider.Source{ID: "mysecret", FilePath: "othersecretfile"}, - }, { - value: "id=mysecret,src=somefile,env=SECRET", - source: &secretsprovider.Source{ID: "mysecret", FilePath: "somefile", Env: "SECRET"}, - }, { - value: "type=file", - source: &secretsprovider.Source{}, - }, { - value: "type=env", - source: &secretsprovider.Source{}, - }, { - value: "type=invalid", - errExpected: true, - errMatch: "unsupported secret type", - }, - } - - for _, tc := range testcases { - t.Run(tc.value, func(t *testing.T) { - secret, err := parseSecret(tc.value) - assert.Equal(t, err != nil, tc.errExpected, fmt.Sprintf("err=%v errExpected=%t", err, tc.errExpected)) - if tc.errMatch != "" { - assert.ErrorContains(t, err, tc.errMatch) - } - assert.DeepEqual(t, secret, tc.source) - }) - } -} - type fakeBuild struct { context *tar.Reader options types.ImageBuildOptions diff --git a/cmd/docker/aliases.go b/cmd/docker/aliases.go new file mode 100644 index 000000000000..3abc6d96b7c2 --- /dev/null +++ b/cmd/docker/aliases.go @@ -0,0 +1,50 @@ +package main + +import ( + "os" + "strings" + + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +const ( + keyBuilderAlias = "builder" +) + +var allowedAliases = map[string]struct{}{ + keyBuilderAlias: {}, +} + +func processAliases(dockerCli command.Cli, cmd *cobra.Command, args, osArgs []string) ([]string, []string, error) { + var err error + aliasMap := dockerCli.ConfigFile().Aliases + aliases := make([][2][]string, 0, len(aliasMap)) + + for k, v := range aliasMap { + if _, ok := allowedAliases[k]; !ok { + return args, osArgs, errors.Errorf("Not allowed to alias %q. Allowed aliases: %#v", k, allowedAliases) + } + if _, _, err := cmd.Find(strings.Split(v, " ")); err == nil { + return args, osArgs, errors.Errorf("Not allowed to alias with builtin %q as target", v) + } + aliases = append(aliases, [2][]string{{k}, {v}}) + } + + args, osArgs, err = processBuilder(dockerCli, cmd, args, os.Args) + if err != nil { + return args, os.Args, err + } + + for _, al := range aliases { + var didChange bool + args, didChange = command.StringSliceReplaceAt(args, al[0], al[1], 0) + if didChange { + osArgs, _ = command.StringSliceReplaceAt(osArgs, al[0], al[1], -1) + break + } + } + + return args, osArgs, nil +} diff --git a/cmd/docker/builder.go b/cmd/docker/builder.go new file mode 100644 index 000000000000..48c9d1a2d6fd --- /dev/null +++ b/cmd/docker/builder.go @@ -0,0 +1,111 @@ +package main + +import ( + "bytes" + "fmt" + "os" + "strconv" + + pluginmanager "github.com/docker/cli/cli-plugins/manager" + "github.com/docker/cli/cli/command" + "github.com/pkg/errors" + "github.com/spf13/cobra" +) + +const ( + builderDefaultPlugin = "buildx" + builderDefaultInstallMsg = `To install buildx, see + https://docs.docker.com/buildx/working-with-buildx/#install. You + can also fallback to the legacy builder by setting DOCKER_BUILDKIT=0` + + builderErrorMsg = "ERROR: Missing builder component %s." +) + +type builderError struct { + builder string + err error +} + +func (e *builderError) Error() string { + var errorMsg bytes.Buffer + errorMsg.WriteString(fmt.Sprintf(builderErrorMsg, e.builder)) + if e.builder == builderDefaultPlugin { + errorMsg.WriteString(" ") + errorMsg.WriteString(builderDefaultInstallMsg) + } + if pluginmanager.IsNotFound(e.err) { + return errors.New(errorMsg.String()).Error() + } + return errors.Errorf("%v\n\n%s", e.err, errorMsg.String()).Error() +} + +func processBuilder(dockerCli command.Cli, cmd *cobra.Command, args, osArgs []string) ([]string, []string, error) { + // check DOCKER_BUILDKIT env var is present and + // if not assume we want to use buildx + if v, ok := os.LookupEnv("DOCKER_BUILDKIT"); ok { + enabled, err := strconv.ParseBool(v) + if err != nil { + return args, osArgs, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value") + } + if !enabled { + return args, osArgs, nil + } + } + + // if a builder alias is defined but not buildx, use this one + builderAlias := builderDefaultPlugin + aliasMap := dockerCli.ConfigFile().Aliases + if v, ok := aliasMap[keyBuilderAlias]; ok { + builderAlias = v + } + + // wcow build command must use the legacy builder + if dockerCli.ServerInfo().OSType == "windows" { + return args, osArgs, nil + } + + // buildx aliases + aliases := [][2][]string{ + { + {"builder"}, + {builderAlias}, + }, + { + {"build"}, + {builderAlias, "build"}, + }, + { + {"image", "build"}, + {builderAlias, "build"}, + }, + } + + // are we using a cmd that should be forwarded to the builder? + var forwarded bool + for _, al := range aliases { + var didChange bool + args, didChange = command.StringSliceReplaceAt(args, al[0], al[1], 0) + if didChange { + forwarded = true + osArgs, _ = command.StringSliceReplaceAt(osArgs, al[0], al[1], -1) + break + } + } + if !forwarded { + return args, osArgs, nil + } + + // check plugin is available if cmd forwarded + plugin, perr := pluginmanager.GetPlugin(builderAlias, dockerCli, cmd.Root()) + if perr == nil && plugin != nil { + perr = plugin.Err + } + if perr != nil { + return args, osArgs, &builderError{ + builder: builderAlias, + err: perr, + } + } + + return args, osArgs, nil +} diff --git a/cmd/docker/builder_test.go b/cmd/docker/builder_test.go new file mode 100644 index 000000000000..3c70b8c45df4 --- /dev/null +++ b/cmd/docker/builder_test.go @@ -0,0 +1,46 @@ +package main + +import ( + "bytes" + "os" + "testing" + + "github.com/docker/cli/cli/command" + "gotest.tools/v3/assert" + "gotest.tools/v3/env" +) + +func TestBuild(t *testing.T) { + var b bytes.Buffer + + dockerCli, err := command.NewDockerCli(command.WithInputStream(discard), command.WithCombinedStreams(&b)) + assert.NilError(t, err) + + tcmd := newDockerCommand(dockerCli) + tcmd.SetArgs([]string{"build", "."}) + + cmd, args, err := tcmd.HandleGlobalFlags() + assert.NilError(t, err) + + args, os.Args, err = processBuilder(dockerCli, cmd, args, os.Args) + assert.NilError(t, err) + assert.DeepEqual(t, []string{builderDefaultPlugin, "build", "."}, args) +} + +func TestBuildkitDisabled(t *testing.T) { + defer env.Patch(t, "DOCKER_BUILDKIT", "0")() + var b bytes.Buffer + + dockerCli, err := command.NewDockerCli(command.WithInputStream(discard), command.WithCombinedStreams(&b)) + assert.NilError(t, err) + + tcmd := newDockerCommand(dockerCli) + tcmd.SetArgs([]string{"build", "."}) + + cmd, args, err := tcmd.HandleGlobalFlags() + assert.NilError(t, err) + + args, os.Args, err = processBuilder(dockerCli, cmd, args, os.Args) + assert.NilError(t, err) + assert.DeepEqual(t, []string{"build", "."}, args) +} diff --git a/cmd/docker/docker.go b/cmd/docker/docker.go index 90945f0c478d..acd094fd9533 100644 --- a/cmd/docker/docker.go +++ b/cmd/docker/docker.go @@ -22,10 +22,6 @@ import ( "github.com/spf13/pflag" ) -var allowedAliases = map[string]struct{}{ - "builder": {}, -} - func newDockerCommand(dockerCli *command.DockerCli) *cli.TopLevelCommand { var ( opts *cliflags.ClientOptions @@ -220,38 +216,6 @@ func tryPluginRun(dockerCli command.Cli, cmd *cobra.Command, subcommand string) return nil } -func processAliases(dockerCli command.Cli, cmd *cobra.Command, args, osArgs []string) ([]string, []string, error) { - aliasMap := dockerCli.ConfigFile().Aliases - aliases := make([][2][]string, 0, len(aliasMap)) - - for k, v := range aliasMap { - if _, ok := allowedAliases[k]; !ok { - return args, osArgs, errors.Errorf("Not allowed to alias %q. Allowed aliases: %#v", k, allowedAliases) - } - if _, _, err := cmd.Find(strings.Split(v, " ")); err == nil { - return args, osArgs, errors.Errorf("Not allowed to alias with builtin %q as target", v) - } - aliases = append(aliases, [2][]string{{k}, {v}}) - } - - if v, ok := aliasMap["builder"]; ok { - aliases = append(aliases, - [2][]string{{"build"}, {v, "build"}}, - [2][]string{{"image", "build"}, {v, "build"}}, - ) - } - for _, al := range aliases { - var didChange bool - args, didChange = command.StringSliceReplaceAt(args, al[0], al[1], 0) - if didChange { - osArgs, _ = command.StringSliceReplaceAt(osArgs, al[0], al[1], -1) - break - } - } - - return args, osArgs, nil -} - func runDocker(dockerCli *command.DockerCli) error { tcmd := newDockerCommand(dockerCli) @@ -346,8 +310,6 @@ func hideSubcommandIf(subcmd *cobra.Command, condition func(string) bool, annota func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error { var ( - buildKitDisabled = func(_ string) bool { v, _ := command.BuildKitEnabled(details.ServerInfo()); return !v } - buildKitEnabled = func(_ string) bool { v, _ := command.BuildKitEnabled(details.ServerInfo()); return v } notExperimental = func(_ string) bool { return !details.ServerInfo().HasExperimental } notOSType = func(v string) bool { return v != details.ServerInfo().OSType } versionOlderThan = func(v string) bool { return versions.LessThan(details.Client().ClientVersion(), v) } @@ -365,16 +327,12 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error { } } - hideFlagIf(f, buildKitDisabled, "buildkit") - hideFlagIf(f, buildKitEnabled, "no-buildkit") hideFlagIf(f, notExperimental, "experimental") hideFlagIf(f, notOSType, "ostype") hideFlagIf(f, versionOlderThan, "version") }) for _, subcmd := range cmd.Commands() { - hideSubcommandIf(subcmd, buildKitDisabled, "buildkit") - hideSubcommandIf(subcmd, buildKitEnabled, "no-buildkit") hideSubcommandIf(subcmd, notExperimental, "experimental") hideSubcommandIf(subcmd, notOSType, "ostype") hideSubcommandIf(subcmd, versionOlderThan, "version") diff --git a/dockerfiles/Dockerfile.dev b/dockerfiles/Dockerfile.dev index fd7af1928570..3278ddfc9137 100644 --- a/dockerfiles/Dockerfile.dev +++ b/dockerfiles/Dockerfile.dev @@ -2,6 +2,9 @@ ARG GO_VERSION=1.16.8 +ARG BUILDX_VERSION=0.6.3 +FROM docker/buildx-bin:${BUILDX_VERSION} AS buildx + FROM golang:${GO_VERSION}-alpine AS golang ENV CGO_ENABLED=0 @@ -39,8 +42,9 @@ CMD bash ENV DISABLE_WARN_OUTSIDE_CONTAINER=1 ENV PATH=$PATH:/go/src/github.com/docker/cli/build -COPY --from=vndr /go/bin/* /go/bin/ -COPY --from=gotestsum /go/bin/* /go/bin/ +COPY --from=buildx /buildx /usr/libexec/docker/cli-plugins/docker-buildx +COPY --from=vndr /go/bin/* /go/bin/ +COPY --from=gotestsum /go/bin/* /go/bin/ COPY --from=goversioninfo /go/bin/* /go/bin/ WORKDIR /go/src/github.com/docker/cli diff --git a/dockerfiles/Dockerfile.e2e b/dockerfiles/Dockerfile.e2e index a70b484e2562..e7201db28d4b 100644 --- a/dockerfiles/Dockerfile.e2e +++ b/dockerfiles/Dockerfile.e2e @@ -1,5 +1,8 @@ ARG GO_VERSION=1.16.8 +ARG BUILDX_VERSION=0.6.3 +FROM docker/buildx-bin:${BUILDX_VERSION} AS buildx + # Use Debian based image as docker-compose requires glibc. FROM golang:${GO_VERSION}-buster @@ -10,6 +13,9 @@ RUN apt-get update && apt-get install -y \ openssh-client \ && rm -rf /var/lib/apt/lists/* +# install buildx +COPY --from=buildx /buildx /usr/libexec/docker/cli-plugins/docker-buildx + ARG COMPOSE_VERSION=1.29.2 RUN curl -fsSL https://github.com/docker/compose/releases/download/${COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` -o /usr/local/bin/docker-compose \ && chmod +x /usr/local/bin/docker-compose diff --git a/e2e/image/build_test.go b/e2e/image/build_test.go index c92c0b0d8959..0d4866454de1 100644 --- a/e2e/image/build_test.go +++ b/e2e/image/build_test.go @@ -12,12 +12,15 @@ import ( "github.com/docker/cli/internal/test/output" "gotest.tools/v3/assert" is "gotest.tools/v3/assert/cmp" + "gotest.tools/v3/env" "gotest.tools/v3/fs" "gotest.tools/v3/icmd" "gotest.tools/v3/skip" ) func TestBuildFromContextDirectoryWithTag(t *testing.T) { + defer env.Patch(t, "DOCKER_BUILDKIT", "0")() + dir := fs.NewDir(t, "test-build-context-dir", fs.WithFile("run", "echo running", fs.WithMode(0755)), fs.WithDir("data", fs.WithFile("one", "1111")), @@ -36,20 +39,22 @@ func TestBuildFromContextDirectoryWithTag(t *testing.T) { result.Assert(t, icmd.Expected{Err: icmd.None}) output.Assert(t, result.Stdout(), map[int]func(string) error{ - 0: output.Prefix("Sending build context to Docker daemon"), - 1: output.Suffix("Step 1/4 : FROM registry:5000/alpine:3.6"), - 3: output.Suffix("Step 2/4 : COPY run /usr/bin/run"), - 5: output.Suffix("Step 3/4 : RUN run"), - 7: output.Suffix("running"), - 8: output.Contains("Removing intermediate container"), - 10: output.Suffix("Step 4/4 : COPY data /data"), - 12: output.Contains("Successfully built "), - 13: output.Suffix("Successfully tagged myimage:latest"), + 0: output.Equals("WARNING: The legacy builder is in use and will build your image in an inefficient way."), + 2: output.Prefix("Sending build context to Docker daemon"), + 3: output.Suffix("Step 1/4 : FROM registry:5000/alpine:3.6"), + 5: output.Suffix("Step 2/4 : COPY run /usr/bin/run"), + 7: output.Suffix("Step 3/4 : RUN run"), + 9: output.Suffix("running"), + 10: output.Contains("Removing intermediate container"), + 12: output.Suffix("Step 4/4 : COPY data /data"), + 14: output.Contains("Successfully built "), + 15: output.Suffix("Successfully tagged myimage:latest"), }) } func TestTrustedBuild(t *testing.T) { skip.If(t, environment.RemoteDaemon()) + defer env.Patch(t, "DOCKER_BUILDKIT", "0")() dir := fixtures.SetupConfigFile(t) defer dir.Remove() @@ -84,6 +89,7 @@ func TestTrustedBuild(t *testing.T) { func TestTrustedBuildUntrustedImage(t *testing.T) { skip.If(t, environment.RemoteDaemon()) + defer env.Patch(t, "DOCKER_BUILDKIT", "0")() dir := fixtures.SetupConfigFile(t) defer dir.Remove() @@ -110,6 +116,8 @@ func TestTrustedBuildUntrustedImage(t *testing.T) { func TestBuildIidFileSquash(t *testing.T) { environment.SkipIfNotExperimentalDaemon(t) + defer env.Patch(t, "DOCKER_BUILDKIT", "0")() + dir := fs.NewDir(t, "test-iidfile-squash") defer dir.Remove() iidfile := filepath.Join(dir.Path(), "idsquash") diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE deleted file mode 100644 index 584149b6ee28..000000000000 --- a/vendor/github.com/containerd/console/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md deleted file mode 100644 index 580b461a73db..000000000000 --- a/vendor/github.com/containerd/console/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# console - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/console)](https://pkg.go.dev/github.com/containerd/console) -[![Build Status](https://github.com/containerd/console/workflows/CI/badge.svg)](https://github.com/containerd/console/actions?query=workflow%3ACI) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/console)](https://goreportcard.com/report/github.com/containerd/console) - -Golang package for dealing with consoles. Light on deps and a simple API. - -## Modifying the current process - -```go -current := console.Current() -defer current.Reset() - -if err := current.SetRaw(); err != nil { -} -ws, err := current.Size() -current.Resize(ws) -``` - -## Project details - -console is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go deleted file mode 100644 index f989d28a41cd..000000000000 --- a/vendor/github.com/containerd/console/console.go +++ /dev/null @@ -1,87 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "errors" - "io" - "os" -) - -var ErrNotAConsole = errors.New("provided file is not a console") - -type File interface { - io.ReadWriteCloser - - // Fd returns its file descriptor - Fd() uintptr - // Name returns its file name - Name() string -} - -type Console interface { - File - - // Resize resizes the console to the provided window size - Resize(WinSize) error - // ResizeFrom resizes the calling console to the size of the - // provided console - ResizeFrom(Console) error - // SetRaw sets the console in raw mode - SetRaw() error - // DisableEcho disables echo on the console - DisableEcho() error - // Reset restores the console to its orignal state - Reset() error - // Size returns the window size of the console - Size() (WinSize, error) -} - -// WinSize specifies the window size of the console -type WinSize struct { - // Height of the console - Height uint16 - // Width of the console - Width uint16 - x uint16 - y uint16 -} - -// Current returns the current process' console -func Current() (c Console) { - var err error - // Usually all three streams (stdin, stdout, and stderr) - // are open to the same console, but some might be redirected, - // so try all three. - for _, s := range []*os.File{os.Stderr, os.Stdout, os.Stdin} { - if c, err = ConsoleFromFile(s); err == nil { - return c - } - } - // One of the std streams should always be a console - // for the design of this function. - panic(err) -} - -// ConsoleFromFile returns a console using the provided file -// nolint:golint -func ConsoleFromFile(f File) (Console, error) { - if err := checkConsole(f); err != nil { - return nil, err - } - return newMaster(f) -} diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go deleted file mode 100644 index c1c839ee3ae0..000000000000 --- a/vendor/github.com/containerd/console/console_linux.go +++ /dev/null @@ -1,280 +0,0 @@ -// +build linux - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "io" - "os" - "sync" - - "golang.org/x/sys/unix" -) - -const ( - maxEvents = 128 -) - -// Epoller manages multiple epoll consoles using edge-triggered epoll api so we -// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP. -// For more details, see: -// - https://github.com/systemd/systemd/pull/4262 -// - https://github.com/moby/moby/issues/27202 -// -// Example usage of Epoller and EpollConsole can be as follow: -// -// epoller, _ := NewEpoller() -// epollConsole, _ := epoller.Add(console) -// go epoller.Wait() -// var ( -// b bytes.Buffer -// wg sync.WaitGroup -// ) -// wg.Add(1) -// go func() { -// io.Copy(&b, epollConsole) -// wg.Done() -// }() -// // perform I/O on the console -// epollConsole.Shutdown(epoller.CloseConsole) -// wg.Wait() -// epollConsole.Close() -type Epoller struct { - efd int - mu sync.Mutex - fdMapping map[int]*EpollConsole - closeOnce sync.Once -} - -// NewEpoller returns an instance of epoller with a valid epoll fd. -func NewEpoller() (*Epoller, error) { - efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC) - if err != nil { - return nil, err - } - return &Epoller{ - efd: efd, - fdMapping: make(map[int]*EpollConsole), - }, nil -} - -// Add creates an epoll console based on the provided console. The console will -// be registered with EPOLLET (i.e. using edge-triggered notification) and its -// file descriptor will be set to non-blocking mode. After this, user should use -// the return console to perform I/O. -func (e *Epoller) Add(console Console) (*EpollConsole, error) { - sysfd := int(console.Fd()) - // Set sysfd to non-blocking mode - if err := unix.SetNonblock(sysfd, true); err != nil { - return nil, err - } - - ev := unix.EpollEvent{ - Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET, - Fd: int32(sysfd), - } - if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil { - return nil, err - } - ef := &EpollConsole{ - Console: console, - sysfd: sysfd, - readc: sync.NewCond(&sync.Mutex{}), - writec: sync.NewCond(&sync.Mutex{}), - } - e.mu.Lock() - e.fdMapping[sysfd] = ef - e.mu.Unlock() - return ef, nil -} - -// Wait starts the loop to wait for its consoles' notifications and signal -// appropriate console that it can perform I/O. -func (e *Epoller) Wait() error { - events := make([]unix.EpollEvent, maxEvents) - for { - n, err := unix.EpollWait(e.efd, events, -1) - if err != nil { - // EINTR: The call was interrupted by a signal handler before either - // any of the requested events occurred or the timeout expired - if err == unix.EINTR { - continue - } - return err - } - for i := 0; i < n; i++ { - ev := &events[i] - // the console is ready to be read from - if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 { - if epfile := e.getConsole(int(ev.Fd)); epfile != nil { - epfile.signalRead() - } - } - // the console is ready to be written to - if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 { - if epfile := e.getConsole(int(ev.Fd)); epfile != nil { - epfile.signalWrite() - } - } - } - } -} - -// CloseConsole unregisters the console's file descriptor from epoll interface -func (e *Epoller) CloseConsole(fd int) error { - e.mu.Lock() - defer e.mu.Unlock() - delete(e.fdMapping, fd) - return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{}) -} - -func (e *Epoller) getConsole(sysfd int) *EpollConsole { - e.mu.Lock() - f := e.fdMapping[sysfd] - e.mu.Unlock() - return f -} - -// Close closes the epoll fd -func (e *Epoller) Close() error { - closeErr := os.ErrClosed // default to "file already closed" - e.closeOnce.Do(func() { - closeErr = unix.Close(e.efd) - }) - return closeErr -} - -// EpollConsole acts like a console but registers its file descriptor with an -// epoll fd and uses epoll API to perform I/O. -type EpollConsole struct { - Console - readc *sync.Cond - writec *sync.Cond - sysfd int - closed bool -} - -// Read reads up to len(p) bytes into p. It returns the number of bytes read -// (0 <= n <= len(p)) and any error encountered. -// -// If the console's read returns EAGAIN or EIO, we assume that it's a -// temporary error because the other side went away and wait for the signal -// generated by epoll event to continue. -func (ec *EpollConsole) Read(p []byte) (n int, err error) { - var read int - ec.readc.L.Lock() - defer ec.readc.L.Unlock() - for { - read, err = ec.Console.Read(p[n:]) - n += read - if err != nil { - var hangup bool - if perr, ok := err.(*os.PathError); ok { - hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) - } else { - hangup = (err == unix.EAGAIN || err == unix.EIO) - } - // if the other end disappear, assume this is temporary and wait for the - // signal to continue again. Unless we didnt read anything and the - // console is already marked as closed then we should exit - if hangup && !(n == 0 && len(p) > 0 && ec.closed) { - ec.readc.Wait() - continue - } - } - break - } - // if we didnt read anything then return io.EOF to end gracefully - if n == 0 && len(p) > 0 && err == nil { - err = io.EOF - } - // signal for others that we finished the read - ec.readc.Signal() - return n, err -} - -// Writes len(p) bytes from p to the console. It returns the number of bytes -// written from p (0 <= n <= len(p)) and any error encountered that caused -// the write to stop early. -// -// If writes to the console returns EAGAIN or EIO, we assume that it's a -// temporary error because the other side went away and wait for the signal -// generated by epoll event to continue. -func (ec *EpollConsole) Write(p []byte) (n int, err error) { - var written int - ec.writec.L.Lock() - defer ec.writec.L.Unlock() - for { - written, err = ec.Console.Write(p[n:]) - n += written - if err != nil { - var hangup bool - if perr, ok := err.(*os.PathError); ok { - hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO) - } else { - hangup = (err == unix.EAGAIN || err == unix.EIO) - } - // if the other end disappears, assume this is temporary and wait for the - // signal to continue again. - if hangup { - ec.writec.Wait() - continue - } - } - // unrecoverable error, break the loop and return the error - break - } - if n < len(p) && err == nil { - err = io.ErrShortWrite - } - // signal for others that we finished the write - ec.writec.Signal() - return n, err -} - -// Shutdown closes the file descriptor and signals call waiters for this fd. -// It accepts a callback which will be called with the console's fd. The -// callback typically will be used to do further cleanup such as unregister the -// console's fd from the epoll interface. -// User should call Shutdown and wait for all I/O operation to be finished -// before closing the console. -func (ec *EpollConsole) Shutdown(close func(int) error) error { - ec.readc.L.Lock() - defer ec.readc.L.Unlock() - ec.writec.L.Lock() - defer ec.writec.L.Unlock() - - ec.readc.Broadcast() - ec.writec.Broadcast() - ec.closed = true - return close(ec.sysfd) -} - -// signalRead signals that the console is readable. -func (ec *EpollConsole) signalRead() { - ec.readc.L.Lock() - ec.readc.Signal() - ec.readc.L.Unlock() -} - -// signalWrite signals that the console is writable. -func (ec *EpollConsole) signalWrite() { - ec.writec.L.Lock() - ec.writec.Signal() - ec.writec.L.Unlock() -} diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go deleted file mode 100644 index a08117695e32..000000000000 --- a/vendor/github.com/containerd/console/console_unix.go +++ /dev/null @@ -1,156 +0,0 @@ -// +build darwin freebsd linux netbsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "golang.org/x/sys/unix" -) - -// NewPty creates a new pty pair -// The master is returned as the first console and a string -// with the path to the pty slave is returned as the second -func NewPty() (Console, string, error) { - f, err := openpt() - if err != nil { - return nil, "", err - } - slave, err := ptsname(f) - if err != nil { - return nil, "", err - } - if err := unlockpt(f); err != nil { - return nil, "", err - } - m, err := newMaster(f) - if err != nil { - return nil, "", err - } - return m, slave, nil -} - -type master struct { - f File - original *unix.Termios -} - -func (m *master) Read(b []byte) (int, error) { - return m.f.Read(b) -} - -func (m *master) Write(b []byte) (int, error) { - return m.f.Write(b) -} - -func (m *master) Close() error { - return m.f.Close() -} - -func (m *master) Resize(ws WinSize) error { - return tcswinsz(m.f.Fd(), ws) -} - -func (m *master) ResizeFrom(c Console) error { - ws, err := c.Size() - if err != nil { - return err - } - return m.Resize(ws) -} - -func (m *master) Reset() error { - if m.original == nil { - return nil - } - return tcset(m.f.Fd(), m.original) -} - -func (m *master) getCurrent() (unix.Termios, error) { - var termios unix.Termios - if err := tcget(m.f.Fd(), &termios); err != nil { - return unix.Termios{}, err - } - return termios, nil -} - -func (m *master) SetRaw() error { - rawState, err := m.getCurrent() - if err != nil { - return err - } - rawState = cfmakeraw(rawState) - rawState.Oflag = rawState.Oflag | unix.OPOST - return tcset(m.f.Fd(), &rawState) -} - -func (m *master) DisableEcho() error { - rawState, err := m.getCurrent() - if err != nil { - return err - } - rawState.Lflag = rawState.Lflag &^ unix.ECHO - return tcset(m.f.Fd(), &rawState) -} - -func (m *master) Size() (WinSize, error) { - return tcgwinsz(m.f.Fd()) -} - -func (m *master) Fd() uintptr { - return m.f.Fd() -} - -func (m *master) Name() string { - return m.f.Name() -} - -// checkConsole checks if the provided file is a console -func checkConsole(f File) error { - var termios unix.Termios - if tcget(f.Fd(), &termios) != nil { - return ErrNotAConsole - } - return nil -} - -func newMaster(f File) (Console, error) { - m := &master{ - f: f, - } - t, err := m.getCurrent() - if err != nil { - return nil, err - } - m.original = &t - return m, nil -} - -// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts normally. In particular, a not-very-well-known default of -// Linux unix98 ptys is that they have +onlcr by default. While this isn't a -// problem for terminal emulators, because we relay data from the terminal we -// also relay that funky line discipline. -func ClearONLCR(fd uintptr) error { - return setONLCR(fd, false) -} - -// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair -// created by us acts as intended for a terminal emulator. -func SetONLCR(fd uintptr) error { - return setONLCR(fd, true) -} diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go deleted file mode 100644 index 129a92883ddb..000000000000 --- a/vendor/github.com/containerd/console/console_windows.go +++ /dev/null @@ -1,216 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -var ( - vtInputSupported bool - ErrNotImplemented = errors.New("not implemented") -) - -func (m *master) initStdios() { - m.in = windows.Handle(os.Stdin.Fd()) - if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil { - // Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. - if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - windows.SetConsoleMode(m.in, m.inMode) - } else { - fmt.Printf("failed to get console mode for stdin: %v\n", err) - } - - m.out = windows.Handle(os.Stdout.Fd()) - if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil { - if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - windows.SetConsoleMode(m.out, m.outMode) - } - } else { - fmt.Printf("failed to get console mode for stdout: %v\n", err) - } - - m.err = windows.Handle(os.Stderr.Fd()) - if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil { - if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil { - m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING - } else { - windows.SetConsoleMode(m.err, m.errMode) - } - } else { - fmt.Printf("failed to get console mode for stderr: %v\n", err) - } -} - -type master struct { - in windows.Handle - inMode uint32 - - out windows.Handle - outMode uint32 - - err windows.Handle - errMode uint32 -} - -func (m *master) SetRaw() error { - if err := makeInputRaw(m.in, m.inMode); err != nil { - return err - } - - // Set StdOut and StdErr to raw mode, we ignore failures since - // windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of - // Windows. - - windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - - windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN) - - return nil -} - -func (m *master) Reset() error { - for _, s := range []struct { - fd windows.Handle - mode uint32 - }{ - {m.in, m.inMode}, - {m.out, m.outMode}, - {m.err, m.errMode}, - } { - if err := windows.SetConsoleMode(s.fd, s.mode); err != nil { - return errors.Wrap(err, "unable to restore console mode") - } - } - - return nil -} - -func (m *master) Size() (WinSize, error) { - var info windows.ConsoleScreenBufferInfo - err := windows.GetConsoleScreenBufferInfo(m.out, &info) - if err != nil { - return WinSize{}, errors.Wrap(err, "unable to get console info") - } - - winsize := WinSize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -func (m *master) Resize(ws WinSize) error { - return ErrNotImplemented -} - -func (m *master) ResizeFrom(c Console) error { - return ErrNotImplemented -} - -func (m *master) DisableEcho() error { - mode := m.inMode &^ windows.ENABLE_ECHO_INPUT - mode |= windows.ENABLE_PROCESSED_INPUT - mode |= windows.ENABLE_LINE_INPUT - - if err := windows.SetConsoleMode(m.in, mode); err != nil { - return errors.Wrap(err, "unable to set console to disable echo") - } - - return nil -} - -func (m *master) Close() error { - return nil -} - -func (m *master) Read(b []byte) (int, error) { - return os.Stdin.Read(b) -} - -func (m *master) Write(b []byte) (int, error) { - return os.Stdout.Write(b) -} - -func (m *master) Fd() uintptr { - return uintptr(m.in) -} - -// on windows, console can only be made from os.Std{in,out,err}, hence there -// isnt a single name here we can use. Return a dummy "console" value in this -// case should be sufficient. -func (m *master) Name() string { - return "console" -} - -// makeInputRaw puts the terminal (Windows Console) connected to the given -// file descriptor into raw mode -func makeInputRaw(fd windows.Handle, mode uint32) error { - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= windows.ENABLE_ECHO_INPUT - mode &^= windows.ENABLE_LINE_INPUT - mode &^= windows.ENABLE_MOUSE_INPUT - mode &^= windows.ENABLE_WINDOW_INPUT - mode &^= windows.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= windows.ENABLE_EXTENDED_FLAGS - mode |= windows.ENABLE_INSERT_MODE - mode |= windows.ENABLE_QUICK_EDIT_MODE - - if vtInputSupported { - mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT - } - - if err := windows.SetConsoleMode(fd, mode); err != nil { - return errors.Wrap(err, "unable to set console to raw mode") - } - - return nil -} - -func checkConsole(f File) error { - var mode uint32 - if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil { - return err - } - return nil -} - -func newMaster(f File) (Console, error) { - if f != os.Stdin && f != os.Stdout && f != os.Stderr { - return nil, errors.New("creating a console from a file is not supported on windows") - } - m := &master{} - m.initStdios() - return m, nil -} diff --git a/vendor/github.com/containerd/console/go.mod b/vendor/github.com/containerd/console/go.mod deleted file mode 100644 index 7fca0a9a3ac1..000000000000 --- a/vendor/github.com/containerd/console/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/containerd/console - -go 1.13 - -require ( - github.com/pkg/errors v0.9.1 - golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c -) diff --git a/vendor/github.com/containerd/console/pty_freebsd_cgo.go b/vendor/github.com/containerd/console/pty_freebsd_cgo.go deleted file mode 100644 index cbd3cd7ea43d..000000000000 --- a/vendor/github.com/containerd/console/pty_freebsd_cgo.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build freebsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" -) - -/* -#include -#include -#include -*/ -import "C" - -// openpt allocates a new pseudo-terminal and establishes a connection with its -// control device. -func openpt() (*os.File, error) { - fd, err := C.posix_openpt(C.O_RDWR) - if err != nil { - return nil, fmt.Errorf("posix_openpt: %w", err) - } - if _, err := C.grantpt(fd); err != nil { - C.close(fd) - return nil, fmt.Errorf("grantpt: %w", err) - } - return os.NewFile(uintptr(fd), ""), nil -} diff --git a/vendor/github.com/containerd/console/pty_freebsd_nocgo.go b/vendor/github.com/containerd/console/pty_freebsd_nocgo.go deleted file mode 100644 index b5e43181d4f3..000000000000 --- a/vendor/github.com/containerd/console/pty_freebsd_nocgo.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build freebsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" -) - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -func openpt() (*os.File, error) { - panic("openpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/pty_unix.go b/vendor/github.com/containerd/console/pty_unix.go deleted file mode 100644 index d5a6bd8ca2e8..000000000000 --- a/vendor/github.com/containerd/console/pty_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin linux netbsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// openpt allocates a new pseudo-terminal by opening the /dev/ptmx device -func openpt() (*os.File, error) { - return os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0) -} diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go deleted file mode 100644 index 787154580f6c..000000000000 --- a/vendor/github.com/containerd/console/tc_darwin.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - return unix.IoctlSetPointerInt(int(f.Fd()), unix.TIOCPTYUNLK, 0) -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/containerd/console/tc_freebsd_cgo.go b/vendor/github.com/containerd/console/tc_freebsd_cgo.go deleted file mode 100644 index 0f3d27273094..000000000000 --- a/vendor/github.com/containerd/console/tc_freebsd_cgo.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build freebsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -/* -#include -#include -*/ -import "C" - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - fd := C.int(f.Fd()) - if _, err := C.unlockpt(fd); err != nil { - C.close(fd) - return fmt.Errorf("unlockpt: %w", err) - } - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/containerd/console/tc_freebsd_nocgo.go b/vendor/github.com/containerd/console/tc_freebsd_nocgo.go deleted file mode 100644 index 087fc158a169..000000000000 --- a/vendor/github.com/containerd/console/tc_freebsd_nocgo.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build freebsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN) - if err != nil { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", n), nil -} diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go deleted file mode 100644 index 7d552ea4ba13..000000000000 --- a/vendor/github.com/containerd/console/tc_linux.go +++ /dev/null @@ -1,51 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "fmt" - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - var u int32 - // XXX do not use unix.IoctlSetPointerInt here, see commit dbd69c59b81. - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u))); err != 0 { - return err - } - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - var u uint32 - // XXX do not use unix.IoctlGetInt here, see commit dbd69c59b81. - if _, _, err := unix.Syscall(unix.SYS_IOCTL, f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&u))); err != 0 { - return "", err - } - return fmt.Sprintf("/dev/pts/%d", u), nil -} diff --git a/vendor/github.com/containerd/console/tc_netbsd.go b/vendor/github.com/containerd/console/tc_netbsd.go deleted file mode 100644 index 71227aefdffd..000000000000 --- a/vendor/github.com/containerd/console/tc_netbsd.go +++ /dev/null @@ -1,45 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "bytes" - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -// This does not exist on NetBSD, it does not allocate controlling terminals on open -func unlockpt(f *os.File) error { - return nil -} - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptm, err := unix.IoctlGetPtmget(int(f.Fd()), unix.TIOCPTSNAME) - if err != nil { - return "", err - } - return string(ptm.Sn[:bytes.IndexByte(ptm.Sn[:], 0)]), nil -} diff --git a/vendor/github.com/containerd/console/tc_openbsd_cgo.go b/vendor/github.com/containerd/console/tc_openbsd_cgo.go deleted file mode 100644 index f0cec06a72dc..000000000000 --- a/vendor/github.com/containerd/console/tc_openbsd_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build openbsd,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -//#include -import "C" - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptspath, err := C.ptsname(C.int(f.Fd())) - if err != nil { - return "", err - } - return C.GoString(ptspath), nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - if _, err := C.grantpt(C.int(f.Fd())); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go b/vendor/github.com/containerd/console/tc_openbsd_nocgo.go deleted file mode 100644 index daccce20585a..000000000000 --- a/vendor/github.com/containerd/console/tc_openbsd_nocgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build openbsd,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TIOCGETA - cmdTcSet = unix.TIOCSETA -) - -func ptsname(f *os.File) (string, error) { - panic("ptsname() support requires cgo.") -} - -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go deleted file mode 100644 index e36a68edd1ed..000000000000 --- a/vendor/github.com/containerd/console/tc_solaris_cgo.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build solaris,cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -//#include -import "C" - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -// ptsname retrieves the name of the first available pts for the given master. -func ptsname(f *os.File) (string, error) { - ptspath, err := C.ptsname(C.int(f.Fd())) - if err != nil { - return "", err - } - return C.GoString(ptspath), nil -} - -// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f. -// unlockpt should be called before opening the slave side of a pty. -func unlockpt(f *os.File) error { - if _, err := C.grantpt(C.int(f.Fd())); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go deleted file mode 100644 index eb0bd2c36b83..000000000000 --- a/vendor/github.com/containerd/console/tc_solaris_nocgo.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build solaris,!cgo - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// -// Implementing the functions below requires cgo support. Non-cgo stubs -// versions are defined below to enable cross-compilation of source code -// that depends on these functions, but the resultant cross-compiled -// binaries cannot actually be used. If the stub function(s) below are -// actually invoked they will display an error message and cause the -// calling process to exit. -// - -package console - -import ( - "os" - - "golang.org/x/sys/unix" -) - -const ( - cmdTcGet = unix.TCGETS - cmdTcSet = unix.TCSETS -) - -func ptsname(f *os.File) (string, error) { - panic("ptsname() support requires cgo.") -} - -func unlockpt(f *os.File) error { - panic("unlockpt() support requires cgo.") -} diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go deleted file mode 100644 index 5cd4c550ce88..000000000000 --- a/vendor/github.com/containerd/console/tc_unix.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build darwin freebsd linux netbsd openbsd solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package console - -import ( - "golang.org/x/sys/unix" -) - -func tcget(fd uintptr, p *unix.Termios) error { - termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet) - if err != nil { - return err - } - *p = *termios - return nil -} - -func tcset(fd uintptr, p *unix.Termios) error { - return unix.IoctlSetTermios(int(fd), cmdTcSet, p) -} - -func tcgwinsz(fd uintptr) (WinSize, error) { - var ws WinSize - - uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) - if err != nil { - return ws, err - } - - // Translate from unix.Winsize to console.WinSize - ws.Height = uws.Row - ws.Width = uws.Col - ws.x = uws.Xpixel - ws.y = uws.Ypixel - return ws, nil -} - -func tcswinsz(fd uintptr, ws WinSize) error { - // Translate from console.WinSize to unix.Winsize - - var uws unix.Winsize - uws.Row = ws.Height - uws.Col = ws.Width - uws.Xpixel = ws.x - uws.Ypixel = ws.y - - return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws) -} - -func setONLCR(fd uintptr, enable bool) error { - var termios unix.Termios - if err := tcget(fd, &termios); err != nil { - return err - } - if enable { - // Set +onlcr so we can act like a real terminal - termios.Oflag |= unix.ONLCR - } else { - // Set -onlcr so we don't have to deal with \r. - termios.Oflag &^= unix.ONLCR - } - return tcset(fd, &termios) -} - -func cfmakeraw(t unix.Termios) unix.Termios { - t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) - t.Oflag &^= unix.OPOST - t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) - t.Cflag &^= (unix.CSIZE | unix.PARENB) - t.Cflag &^= unix.CS8 - t.Cc[unix.VMIN] = 1 - t.Cc[unix.VTIME] = 0 - - return t -} diff --git a/vendor/github.com/containerd/containerd/api/README.md b/vendor/github.com/containerd/containerd/api/README.md deleted file mode 100644 index f6eb28c6a578..000000000000 --- a/vendor/github.com/containerd/containerd/api/README.md +++ /dev/null @@ -1,18 +0,0 @@ -This directory contains the GRPC API definitions for containerd. - -All defined services and messages have been aggregated into `*.pb.txt` -descriptors files in this directory. Definitions present here are considered -frozen after the release. - -At release time, the current `next.pb.txt` file will be moved into place to -freeze the API changes for the minor version. For example, when 1.0.0 is -released, `next.pb.txt` should be moved to `1.0.txt`. Notice that we leave off -the patch number, since the API will be completely locked down for a given -patch series. - -We may find that by default, protobuf descriptors are too noisy to lock down -API changes. In that case, we may filter out certain fields in the descriptors, -possibly regenerating for old versions. - -This process is similar to the [process used to ensure backwards compatibility -in Go](https://github.com/golang/go/tree/master/api). diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go deleted file mode 100644 index 97c7d4a92b36..000000000000 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go +++ /dev/null @@ -1,5425 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: github.com/containerd/containerd/api/services/content/v1/content.proto - -package content - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/gogo/protobuf/types" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// WriteAction defines the behavior of a WriteRequest. -type WriteAction int32 - -const ( - // WriteActionStat instructs the writer to return the current status while - // holding the lock on the write. - WriteActionStat WriteAction = 0 - // WriteActionWrite sets the action for the write request to write data. - // - // Any data included will be written at the provided offset. The - // transaction will be left open for further writes. - // - // This is the default. - WriteActionWrite WriteAction = 1 - // WriteActionCommit will write any outstanding data in the message and - // commit the write, storing it under the digest. - // - // This can be used in a single message to send the data, verify it and - // commit it. - // - // This action will always terminate the write. - WriteActionCommit WriteAction = 2 -) - -var WriteAction_name = map[int32]string{ - 0: "STAT", - 1: "WRITE", - 2: "COMMIT", -} - -var WriteAction_value = map[string]int32{ - "STAT": 0, - "WRITE": 1, - "COMMIT": 2, -} - -func (x WriteAction) String() string { - return proto.EnumName(WriteAction_name, int32(x)) -} - -func (WriteAction) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{0} -} - -type Info struct { - // Digest is the hash identity of the blob. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - // Size is the total number of bytes in the blob. - Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"` - // CreatedAt provides the time at which the blob was committed. - CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3,stdtime" json:"created_at"` - // UpdatedAt provides the time the info was last updated. - UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - // Labels are arbitrary data on snapshots. - // - // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,5,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Info) Reset() { *m = Info{} } -func (*Info) ProtoMessage() {} -func (*Info) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{0} -} -func (m *Info) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Info.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Info) XXX_Merge(src proto.Message) { - xxx_messageInfo_Info.Merge(m, src) -} -func (m *Info) XXX_Size() int { - return m.Size() -} -func (m *Info) XXX_DiscardUnknown() { - xxx_messageInfo_Info.DiscardUnknown(m) -} - -var xxx_messageInfo_Info proto.InternalMessageInfo - -type InfoRequest struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InfoRequest) Reset() { *m = InfoRequest{} } -func (*InfoRequest) ProtoMessage() {} -func (*InfoRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{1} -} -func (m *InfoRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InfoRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoRequest.Merge(m, src) -} -func (m *InfoRequest) XXX_Size() int { - return m.Size() -} -func (m *InfoRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InfoRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InfoRequest proto.InternalMessageInfo - -type InfoResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InfoResponse) Reset() { *m = InfoResponse{} } -func (*InfoResponse) ProtoMessage() {} -func (*InfoResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{2} -} -func (m *InfoResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InfoResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InfoResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_InfoResponse.Merge(m, src) -} -func (m *InfoResponse) XXX_Size() int { - return m.Size() -} -func (m *InfoResponse) XXX_DiscardUnknown() { - xxx_messageInfo_InfoResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_InfoResponse proto.InternalMessageInfo - -type UpdateRequest struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - // UpdateMask specifies which fields to perform the update on. If empty, - // the operation applies to all fields. - // - // In info, Digest, Size, and CreatedAt are immutable, - // other field may be updated using this mask. - // If no mask is provided, all mutable field are updated. - UpdateMask *types.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateRequest) Reset() { *m = UpdateRequest{} } -func (*UpdateRequest) ProtoMessage() {} -func (*UpdateRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{3} -} -func (m *UpdateRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UpdateRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateRequest.Merge(m, src) -} -func (m *UpdateRequest) XXX_Size() int { - return m.Size() -} -func (m *UpdateRequest) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateRequest proto.InternalMessageInfo - -type UpdateResponse struct { - Info Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UpdateResponse) Reset() { *m = UpdateResponse{} } -func (*UpdateResponse) ProtoMessage() {} -func (*UpdateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{4} -} -func (m *UpdateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UpdateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UpdateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateResponse.Merge(m, src) -} -func (m *UpdateResponse) XXX_Size() int { - return m.Size() -} -func (m *UpdateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_UpdateResponse proto.InternalMessageInfo - -type ListContentRequest struct { - // Filters contains one or more filters using the syntax defined in the - // containerd filter package. - // - // The returned result will be those that match any of the provided - // filters. Expanded, containers that match the following will be - // returned: - // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] - // - // If filters is zero-length or nil, all items will be returned. - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListContentRequest) Reset() { *m = ListContentRequest{} } -func (*ListContentRequest) ProtoMessage() {} -func (*ListContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{5} -} -func (m *ListContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContentRequest.Merge(m, src) -} -func (m *ListContentRequest) XXX_Size() int { - return m.Size() -} -func (m *ListContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListContentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListContentRequest proto.InternalMessageInfo - -type ListContentResponse struct { - Info []Info `protobuf:"bytes,1,rep,name=info,proto3" json:"info"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListContentResponse) Reset() { *m = ListContentResponse{} } -func (*ListContentResponse) ProtoMessage() {} -func (*ListContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{6} -} -func (m *ListContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListContentResponse.Merge(m, src) -} -func (m *ListContentResponse) XXX_Size() int { - return m.Size() -} -func (m *ListContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListContentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListContentResponse proto.InternalMessageInfo - -type DeleteContentRequest struct { - // Digest specifies which content to delete. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DeleteContentRequest) Reset() { *m = DeleteContentRequest{} } -func (*DeleteContentRequest) ProtoMessage() {} -func (*DeleteContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{7} -} -func (m *DeleteContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeleteContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeleteContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteContentRequest.Merge(m, src) -} -func (m *DeleteContentRequest) XXX_Size() int { - return m.Size() -} -func (m *DeleteContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteContentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DeleteContentRequest proto.InternalMessageInfo - -// ReadContentRequest defines the fields that make up a request to read a portion of -// data from a stored object. -type ReadContentRequest struct { - // Digest is the hash identity to read. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - // Offset specifies the number of bytes from the start at which to begin - // the read. If zero or less, the read will be from the start. This uses - // standard zero-indexed semantics. - Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` - // size is the total size of the read. If zero, the entire blob will be - // returned by the service. - Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadContentRequest) Reset() { *m = ReadContentRequest{} } -func (*ReadContentRequest) ProtoMessage() {} -func (*ReadContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{8} -} -func (m *ReadContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadContentRequest.Merge(m, src) -} -func (m *ReadContentRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadContentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadContentRequest proto.InternalMessageInfo - -// ReadContentResponse carries byte data for a read request. -type ReadContentResponse struct { - Offset int64 `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadContentResponse) Reset() { *m = ReadContentResponse{} } -func (*ReadContentResponse) ProtoMessage() {} -func (*ReadContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{9} -} -func (m *ReadContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadContentResponse.Merge(m, src) -} -func (m *ReadContentResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadContentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadContentResponse proto.InternalMessageInfo - -type Status struct { - StartedAt time.Time `protobuf:"bytes,1,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` - UpdatedAt time.Time `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - Ref string `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"` - Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{10} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return m.Size() -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -type StatusRequest struct { - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{11} -} -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) -} -func (m *StatusRequest) XXX_Size() int { - return m.Size() -} -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo - -type StatusResponse struct { - Status *Status `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{12} -} -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) -} -func (m *StatusResponse) XXX_Size() int { - return m.Size() -} -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo - -type ListStatusesRequest struct { - Filters []string `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListStatusesRequest) Reset() { *m = ListStatusesRequest{} } -func (*ListStatusesRequest) ProtoMessage() {} -func (*ListStatusesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{13} -} -func (m *ListStatusesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListStatusesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListStatusesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListStatusesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListStatusesRequest.Merge(m, src) -} -func (m *ListStatusesRequest) XXX_Size() int { - return m.Size() -} -func (m *ListStatusesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListStatusesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListStatusesRequest proto.InternalMessageInfo - -type ListStatusesResponse struct { - Statuses []Status `protobuf:"bytes,1,rep,name=statuses,proto3" json:"statuses"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListStatusesResponse) Reset() { *m = ListStatusesResponse{} } -func (*ListStatusesResponse) ProtoMessage() {} -func (*ListStatusesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{14} -} -func (m *ListStatusesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListStatusesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListStatusesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListStatusesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListStatusesResponse.Merge(m, src) -} -func (m *ListStatusesResponse) XXX_Size() int { - return m.Size() -} -func (m *ListStatusesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListStatusesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListStatusesResponse proto.InternalMessageInfo - -// WriteContentRequest writes data to the request ref at offset. -type WriteContentRequest struct { - // Action sets the behavior of the write. - // - // When this is a write and the ref is not yet allocated, the ref will be - // allocated and the data will be written at offset. - // - // If the action is write and the ref is allocated, it will accept data to - // an offset that has not yet been written. - // - // If the action is write and there is no data, the current write status - // will be returned. This works differently from status because the stream - // holds a lock. - Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` - // Ref identifies the pre-commit object to write to. - Ref string `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"` - // Total can be set to have the service validate the total size of the - // committed content. - // - // The latest value before or with the commit action message will be use to - // validate the content. If the offset overflows total, the service may - // report an error. It is only required on one message for the write. - // - // If the value is zero or less, no validation of the final content will be - // performed. - Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"` - // Expected can be set to have the service validate the final content against - // the provided digest. - // - // If the digest is already present in the object store, an AlreadyExists - // error will be returned. - // - // Only the latest version will be used to check the content against the - // digest. It is only required to include it on a single message, before or - // with the commit action message. - Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"` - // Offset specifies the number of bytes from the start at which to begin - // the write. For most implementations, this means from the start of the - // file. This uses standard, zero-indexed semantics. - // - // If the action is write, the remote may remove all previously written - // data after the offset. Implementations may support arbitrary offsets but - // MUST support reseting this value to zero with a write. If an - // implementation does not support a write at a particular offset, an - // OutOfRange error must be returned. - Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"` - // Data is the actual bytes to be written. - // - // If this is empty and the message is not a commit, a response will be - // returned with the current write state. - Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"` - // Labels are arbitrary data on snapshots. - // - // The combined size of a key/value pair cannot exceed 4096 bytes. - Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WriteContentRequest) Reset() { *m = WriteContentRequest{} } -func (*WriteContentRequest) ProtoMessage() {} -func (*WriteContentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{15} -} -func (m *WriteContentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteContentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteContentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteContentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteContentRequest.Merge(m, src) -} -func (m *WriteContentRequest) XXX_Size() int { - return m.Size() -} -func (m *WriteContentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_WriteContentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteContentRequest proto.InternalMessageInfo - -// WriteContentResponse is returned on the culmination of a write call. -type WriteContentResponse struct { - // Action contains the action for the final message of the stream. A writer - // should confirm that they match the intended result. - Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"` - // StartedAt provides the time at which the write began. - // - // This must be set for stat and commit write actions. All other write - // actions may omit this. - StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,proto3,stdtime" json:"started_at"` - // UpdatedAt provides the last time of a successful write. - // - // This must be set for stat and commit write actions. All other write - // actions may omit this. - UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,proto3,stdtime" json:"updated_at"` - // Offset is the current committed size for the write. - Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"` - // Total provides the current, expected total size of the write. - // - // We include this to provide consistency with the Status structure on the - // client writer. - // - // This is only valid on the Stat and Commit response. - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - // Digest, if present, includes the digest up to the currently committed - // bytes. If action is commit, this field will be set. It is implementation - // defined if this is set for other actions. - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WriteContentResponse) Reset() { *m = WriteContentResponse{} } -func (*WriteContentResponse) ProtoMessage() {} -func (*WriteContentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{16} -} -func (m *WriteContentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WriteContentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WriteContentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WriteContentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_WriteContentResponse.Merge(m, src) -} -func (m *WriteContentResponse) XXX_Size() int { - return m.Size() -} -func (m *WriteContentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_WriteContentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_WriteContentResponse proto.InternalMessageInfo - -type AbortRequest struct { - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *AbortRequest) Reset() { *m = AbortRequest{} } -func (*AbortRequest) ProtoMessage() {} -func (*AbortRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_468430ba3e400391, []int{17} -} -func (m *AbortRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AbortRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AbortRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AbortRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_AbortRequest.Merge(m, src) -} -func (m *AbortRequest) XXX_Size() int { - return m.Size() -} -func (m *AbortRequest) XXX_DiscardUnknown() { - xxx_messageInfo_AbortRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_AbortRequest proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value) - proto.RegisterType((*Info)(nil), "containerd.services.content.v1.Info") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.Info.LabelsEntry") - proto.RegisterType((*InfoRequest)(nil), "containerd.services.content.v1.InfoRequest") - proto.RegisterType((*InfoResponse)(nil), "containerd.services.content.v1.InfoResponse") - proto.RegisterType((*UpdateRequest)(nil), "containerd.services.content.v1.UpdateRequest") - proto.RegisterType((*UpdateResponse)(nil), "containerd.services.content.v1.UpdateResponse") - proto.RegisterType((*ListContentRequest)(nil), "containerd.services.content.v1.ListContentRequest") - proto.RegisterType((*ListContentResponse)(nil), "containerd.services.content.v1.ListContentResponse") - proto.RegisterType((*DeleteContentRequest)(nil), "containerd.services.content.v1.DeleteContentRequest") - proto.RegisterType((*ReadContentRequest)(nil), "containerd.services.content.v1.ReadContentRequest") - proto.RegisterType((*ReadContentResponse)(nil), "containerd.services.content.v1.ReadContentResponse") - proto.RegisterType((*Status)(nil), "containerd.services.content.v1.Status") - proto.RegisterType((*StatusRequest)(nil), "containerd.services.content.v1.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "containerd.services.content.v1.StatusResponse") - proto.RegisterType((*ListStatusesRequest)(nil), "containerd.services.content.v1.ListStatusesRequest") - proto.RegisterType((*ListStatusesResponse)(nil), "containerd.services.content.v1.ListStatusesResponse") - proto.RegisterType((*WriteContentRequest)(nil), "containerd.services.content.v1.WriteContentRequest") - proto.RegisterMapType((map[string]string)(nil), "containerd.services.content.v1.WriteContentRequest.LabelsEntry") - proto.RegisterType((*WriteContentResponse)(nil), "containerd.services.content.v1.WriteContentResponse") - proto.RegisterType((*AbortRequest)(nil), "containerd.services.content.v1.AbortRequest") -} - -func init() { - proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptor_468430ba3e400391) -} - -var fileDescriptor_468430ba3e400391 = []byte{ - // 1081 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x67, 0xbb, 0x42, - 0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b, - 0x97, 0x40, 0x2f, 0x65, 0x6d, 0x8f, 0xcd, 0x2a, 0xb6, 0xd7, 0xdd, 0x19, 0x5b, 0x84, 0x13, 0x17, - 0x24, 0x14, 0xf5, 0x80, 0xb8, 0xe7, 0x02, 0xfc, 0x15, 0x1c, 0x38, 0xe7, 0xc8, 0x11, 0x71, 0x68, - 0x69, 0xfe, 0x07, 0xee, 0x68, 0x66, 0x67, 0xed, 0xf5, 0x47, 0x58, 0xdb, 0x31, 0x27, 0xbf, 0x99, - 0x7d, 0xbf, 0xf7, 0xfd, 0x31, 0x86, 0x7b, 0x4d, 0x87, 0x7d, 0xdd, 0xab, 0x9a, 0x35, 0xb7, 0x5d, - 0xa8, 0xb9, 0x1d, 0x66, 0x3b, 0x1d, 0xe2, 0xd5, 0xc3, 0xa4, 0xdd, 0x75, 0x0a, 0x94, 0x78, 0x7d, - 0xa7, 0x46, 0xa8, 0xb8, 0x27, 0x1d, 0x56, 0xe8, 0xdf, 0x0a, 0x48, 0xb3, 0xeb, 0xb9, 0xcc, 0xc5, - 0xb9, 0x21, 0xc2, 0x0c, 0xb8, 0xcd, 0x80, 0xa5, 0x7f, 0x4b, 0xcb, 0x34, 0xdd, 0xa6, 0x2b, 0x58, - 0x0b, 0x9c, 0xf2, 0x51, 0x9a, 0xde, 0x74, 0xdd, 0x66, 0x8b, 0x14, 0xc4, 0xa9, 0xda, 0x6b, 0x14, - 0x1a, 0x0e, 0x69, 0xd5, 0x9f, 0xb6, 0x6d, 0x7a, 0x24, 0x39, 0x36, 0xc7, 0x39, 0x98, 0xd3, 0x26, - 0x94, 0xd9, 0xed, 0xae, 0x64, 0x78, 0x73, 0x9c, 0x81, 0xb4, 0xbb, 0xec, 0xd8, 0xff, 0x68, 0xfc, - 0x13, 0x87, 0xc4, 0x7e, 0xa7, 0xe1, 0xe2, 0x4f, 0x40, 0xad, 0x3b, 0x4d, 0x42, 0x59, 0x16, 0xe9, - 0x28, 0xbf, 0xb6, 0x5b, 0x3c, 0x7b, 0xb1, 0x19, 0xfb, 0xeb, 0xc5, 0xe6, 0x8d, 0x90, 0xfb, 0x6e, - 0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09, - 0x18, 0x43, 0x82, 0x3a, 0xdf, 0x92, 0x6c, 0x5c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4, - 0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0xac, 0xa2, 0xa3, 0x7c, 0xaa, 0xa8, 0x99, 0xbe, 0x69, - 0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe5, 0xfa, 0x7f, 0x7c, 0xb9, 0x89, 0xac, 0x35, - 0x89, 0x2b, 0x31, 0x2e, 0xa4, 0xd7, 0xad, 0x07, 0x42, 0x12, 0xf3, 0x08, 0x91, 0xb8, 0x12, 0xc3, - 0xf7, 0x41, 0x6d, 0xd9, 0x55, 0xd2, 0xa2, 0xd9, 0xa4, 0xae, 0xe4, 0x53, 0xc5, 0x6d, 0xf3, 0xbf, - 0x33, 0x63, 0xf2, 0xf8, 0x98, 0x0f, 0x04, 0x64, 0xaf, 0xc3, 0xbc, 0x63, 0x4b, 0xe2, 0xb5, 0xf7, - 0x21, 0x15, 0xba, 0xc6, 0x69, 0x50, 0x8e, 0xc8, 0xb1, 0x1f, 0x3f, 0x8b, 0x93, 0x38, 0x03, 0xc9, - 0xbe, 0xdd, 0xea, 0xf9, 0x91, 0x58, 0xb3, 0xfc, 0xc3, 0x07, 0xf1, 0xf7, 0x90, 0xf1, 0x25, 0xa4, - 0xb8, 0x58, 0x8b, 0x3c, 0xeb, 0xf1, 0x88, 0x2d, 0x31, 0xfa, 0xc6, 0x43, 0x58, 0xf7, 0x45, 0xd3, - 0xae, 0xdb, 0xa1, 0x04, 0x7f, 0x08, 0x09, 0xa7, 0xd3, 0x70, 0x85, 0xe4, 0x54, 0xf1, 0xed, 0x59, - 0xbc, 0xdd, 0x4d, 0x70, 0xfd, 0x96, 0xc0, 0x19, 0xcf, 0x11, 0x5c, 0x79, 0x2c, 0xa2, 0x17, 0x58, - 0x7b, 0x49, 0x89, 0xf8, 0x0e, 0xa4, 0xfc, 0x74, 0x88, 0x3a, 0x16, 0xc1, 0x99, 0x96, 0xc7, 0x7b, - 0xbc, 0xd4, 0x0f, 0x6c, 0x7a, 0x64, 0xc9, 0xac, 0x73, 0xda, 0xf8, 0x0c, 0xae, 0x06, 0xd6, 0x2c, - 0xc9, 0x41, 0x13, 0xf0, 0x03, 0x87, 0xb2, 0xb2, 0xcf, 0x12, 0x38, 0x99, 0x85, 0x95, 0x86, 0xd3, - 0x62, 0xc4, 0xa3, 0x59, 0xa4, 0x2b, 0xf9, 0x35, 0x2b, 0x38, 0x1a, 0x8f, 0x61, 0x63, 0x84, 0x7f, - 0xc2, 0x0c, 0x65, 0x21, 0x33, 0xaa, 0x90, 0xb9, 0x4b, 0x5a, 0x84, 0x91, 0x31, 0x43, 0x96, 0x59, - 0x1b, 0xcf, 0x11, 0x60, 0x8b, 0xd8, 0xf5, 0xff, 0x4f, 0x05, 0xbe, 0x06, 0xaa, 0xdb, 0x68, 0x50, - 0xc2, 0x64, 0xfb, 0xcb, 0xd3, 0x60, 0x28, 0x28, 0xc3, 0xa1, 0x60, 0x94, 0x60, 0x63, 0xc4, 0x1a, - 0x19, 0xc9, 0xa1, 0x08, 0x34, 0x2e, 0xa2, 0x6e, 0x33, 0x5b, 0x08, 0x5e, 0xb7, 0x04, 0x6d, 0xfc, - 0x1c, 0x07, 0xf5, 0x11, 0xb3, 0x59, 0x8f, 0xf2, 0xe9, 0x40, 0x99, 0xed, 0xc9, 0xe9, 0x80, 0xe6, - 0x99, 0x0e, 0x12, 0x37, 0x31, 0x62, 0xe2, 0x8b, 0x8d, 0x98, 0x34, 0x28, 0x1e, 0x69, 0x08, 0x57, - 0xd7, 0x2c, 0x4e, 0x86, 0x5c, 0x4a, 0x8c, 0xb8, 0x94, 0x81, 0x24, 0x73, 0x99, 0xdd, 0xca, 0x26, - 0xc5, 0xb5, 0x7f, 0xc0, 0x0f, 0x61, 0x95, 0x7c, 0xd3, 0x25, 0x35, 0x46, 0xea, 0x59, 0x75, 0xe1, - 0x8c, 0x0c, 0x64, 0x18, 0xd7, 0xe1, 0x8a, 0x1f, 0xa3, 0x20, 0xe1, 0xd2, 0x40, 0x34, 0x30, 0x90, - 0xb7, 0x55, 0xc0, 0x32, 0xa8, 0x67, 0x95, 0x8a, 0x1b, 0x19, 0xca, 0x77, 0xa2, 0x2a, 0x5a, 0xe2, - 0x25, 0xca, 0x28, 0xf8, 0x6d, 0xe2, 0xdf, 0x12, 0x1a, 0xdd, 0x57, 0x5f, 0x41, 0x66, 0x14, 0x20, - 0x0d, 0xb9, 0x0f, 0xab, 0x54, 0xde, 0xc9, 0xe6, 0x9a, 0xd1, 0x14, 0xd9, 0x5e, 0x03, 0xb4, 0xf1, - 0x93, 0x02, 0x1b, 0x87, 0x9e, 0x33, 0xd1, 0x62, 0x65, 0x50, 0xed, 0x1a, 0x73, 0xdc, 0x8e, 0x70, - 0xf5, 0x6a, 0xf1, 0x66, 0x94, 0x7c, 0x21, 0xa4, 0x24, 0x20, 0x96, 0x84, 0x06, 0x31, 0x8d, 0x0f, - 0x93, 0x3e, 0x48, 0xae, 0x72, 0x51, 0x72, 0x13, 0x97, 0x4f, 0x6e, 0xa8, 0xb4, 0x92, 0x53, 0xbb, - 0x45, 0x1d, 0x76, 0x0b, 0x3e, 0x1c, 0xec, 0xbe, 0x15, 0x11, 0xc8, 0x8f, 0x66, 0x72, 0x74, 0x34, - 0x5a, 0xcb, 0x5e, 0x85, 0x2f, 0xe3, 0x90, 0x19, 0x55, 0x23, 0xf3, 0xbe, 0x94, 0xac, 0x8c, 0x0e, - 0x85, 0xf8, 0x32, 0x86, 0x82, 0xb2, 0xd8, 0x50, 0x98, 0x6f, 0x04, 0x0c, 0x47, 0xb2, 0x7a, 0xe9, - 0xa9, 0xaf, 0xc3, 0x7a, 0xa9, 0xea, 0x7a, 0xec, 0xc2, 0xee, 0xbf, 0xf1, 0x3d, 0x82, 0x54, 0x28, - 0x7a, 0xf8, 0x2d, 0x48, 0x3c, 0xaa, 0x94, 0x2a, 0xe9, 0x98, 0xb6, 0x71, 0x72, 0xaa, 0xbf, 0x16, - 0xfa, 0xc4, 0x3b, 0x0b, 0x6f, 0x42, 0xf2, 0xd0, 0xda, 0xaf, 0xec, 0xa5, 0x91, 0x96, 0x39, 0x39, - 0xd5, 0xd3, 0xa1, 0xef, 0x82, 0xc4, 0xd7, 0x41, 0x2d, 0x7f, 0x7a, 0x70, 0xb0, 0x5f, 0x49, 0xc7, - 0xb5, 0x37, 0x4e, 0x4e, 0xf5, 0xd7, 0x43, 0x1c, 0x65, 0xb7, 0xdd, 0x76, 0x98, 0xb6, 0xf1, 0xc3, - 0x2f, 0xb9, 0xd8, 0x6f, 0xbf, 0xe6, 0xc2, 0x7a, 0x8b, 0xbf, 0xaf, 0xc0, 0x8a, 0x2c, 0x03, 0x6c, - 0xcb, 0x97, 0xe9, 0xcd, 0x59, 0x36, 0xa9, 0x74, 0x4d, 0x7b, 0x77, 0x36, 0x66, 0x59, 0x61, 0x4d, - 0x50, 0xfd, 0xb7, 0x04, 0xde, 0x8a, 0xc2, 0x8d, 0xbc, 0x80, 0x34, 0x73, 0x56, 0x76, 0xa9, 0xe8, - 0x19, 0x24, 0xf8, 0x68, 0xc3, 0xc5, 0x28, 0xdc, 0xe4, 0x43, 0x44, 0xdb, 0x99, 0x0b, 0xe3, 0x2b, - 0xdc, 0x46, 0xf8, 0x73, 0x50, 0xfd, 0xe7, 0x04, 0xbe, 0x1d, 0x25, 0x60, 0xda, 0xb3, 0x43, 0xbb, - 0x36, 0x51, 0xdf, 0x7b, 0xfc, 0x7f, 0x03, 0x77, 0x85, 0xef, 0xec, 0x68, 0x57, 0x26, 0xdf, 0x19, - 0xd1, 0xae, 0x4c, 0x79, 0x0d, 0x6c, 0x23, 0x9e, 0x26, 0xb9, 0xe2, 0xb7, 0x66, 0xdc, 0x41, 0xb3, - 0xa6, 0x69, 0x6c, 0xe5, 0x1d, 0xc3, 0x7a, 0x78, 0x03, 0xe1, 0x99, 0x42, 0x3f, 0xb6, 0xe0, 0xb4, - 0xdb, 0xf3, 0x81, 0xa4, 0xea, 0x3e, 0x24, 0xfd, 0xd6, 0xd9, 0x59, 0x60, 0x24, 0x47, 0xeb, 0x9c, - 0x36, 0x60, 0xf3, 0x68, 0x1b, 0xe1, 0x03, 0x48, 0x8a, 0xd9, 0x80, 0x23, 0x3b, 0x27, 0x3c, 0x42, - 0x2e, 0xaa, 0x8e, 0xdd, 0x27, 0x67, 0xaf, 0x72, 0xb1, 0x3f, 0x5f, 0xe5, 0x62, 0xdf, 0x9d, 0xe7, - 0xd0, 0xd9, 0x79, 0x0e, 0xfd, 0x71, 0x9e, 0x43, 0x7f, 0x9f, 0xe7, 0xd0, 0x93, 0x8f, 0x17, 0xfd, - 0x1f, 0x7d, 0x47, 0x92, 0x5f, 0xc4, 0xaa, 0xaa, 0xd0, 0xb6, 0xf3, 0x6f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xc0, 0xc2, 0x35, 0xb1, 0x94, 0x0f, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ContentClient is the client API for Content service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ContentClient interface { - // Info returns information about a committed object. - // - // This call can be used for getting the size of content and checking for - // existence. - Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) - // Update updates content metadata. - // - // This call can be used to manage the mutable content labels. The - // immutable metadata such as digest, size, and committed at cannot - // be updated. - Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) - // List streams the entire set of content as Info objects and closes the - // stream. - // - // Typically, this will yield a large response, chunked into messages. - // Clients should make provisions to ensure they can handle the entire data - // set. - List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) - // Delete will delete the referenced object. - Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) - // Read allows one to read an object based on the offset into the content. - // - // The requested data may be returned in one or more messages. - Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) - // Status returns the status for a single reference. - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) - // ListStatuses returns the status of ongoing object ingestions, started via - // Write. - // - // Only those matching the regular expression will be provided in the - // response. If the provided regular expression is empty, all ingestions - // will be provided. - ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) - // Write begins or resumes writes to a resource identified by a unique ref. - // Only one active stream may exist at a time for each ref. - // - // Once a write stream has started, it may only write to a single ref, thus - // once a stream is started, the ref may be omitted on subsequent writes. - // - // For any write transaction represented by a ref, only a single write may - // be made to a given offset. If overlapping writes occur, it is an error. - // Writes should be sequential and implementations may throw an error if - // this is required. - // - // If expected_digest is set and already part of the content store, the - // write will fail. - // - // When completed, the commit flag should be set to true. If expected size - // or digest is set, the content will be validated against those values. - Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) - // Abort cancels the ongoing write named in the request. Any resources - // associated with the write will be collected. - Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) -} - -type contentClient struct { - cc *grpc.ClientConn -} - -func NewContentClient(cc *grpc.ClientConn) ContentClient { - return &contentClient{cc} -} - -func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) { - out := new(InfoResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) { - out := new(UpdateResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[0], "/containerd.services.content.v1.Content/List", opts...) - if err != nil { - return nil, err - } - x := &contentListClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Content_ListClient interface { - Recv() (*ListContentResponse, error) - grpc.ClientStream -} - -type contentListClient struct { - grpc.ClientStream -} - -func (x *contentListClient) Recv() (*ListContentResponse, error) { - m := new(ListContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[1], "/containerd.services.content.v1.Content/Read", opts...) - if err != nil { - return nil, err - } - x := &contentReadClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Content_ReadClient interface { - Recv() (*ReadContentResponse, error) - grpc.ClientStream -} - -type contentReadClient struct { - grpc.ClientStream -} - -func (x *contentReadClient) Recv() (*ReadContentResponse, error) { - m := new(ReadContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) { - out := new(ListStatusesResponse) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) { - stream, err := c.cc.NewStream(ctx, &_Content_serviceDesc.Streams[2], "/containerd.services.content.v1.Content/Write", opts...) - if err != nil { - return nil, err - } - x := &contentWriteClient{stream} - return x, nil -} - -type Content_WriteClient interface { - Send(*WriteContentRequest) error - Recv() (*WriteContentResponse, error) - grpc.ClientStream -} - -type contentWriteClient struct { - grpc.ClientStream -} - -func (x *contentWriteClient) Send(m *WriteContentRequest) error { - return x.ClientStream.SendMsg(m) -} - -func (x *contentWriteClient) Recv() (*WriteContentResponse, error) { - m := new(WriteContentResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*types.Empty, error) { - out := new(types.Empty) - err := c.cc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ContentServer is the server API for Content service. -type ContentServer interface { - // Info returns information about a committed object. - // - // This call can be used for getting the size of content and checking for - // existence. - Info(context.Context, *InfoRequest) (*InfoResponse, error) - // Update updates content metadata. - // - // This call can be used to manage the mutable content labels. The - // immutable metadata such as digest, size, and committed at cannot - // be updated. - Update(context.Context, *UpdateRequest) (*UpdateResponse, error) - // List streams the entire set of content as Info objects and closes the - // stream. - // - // Typically, this will yield a large response, chunked into messages. - // Clients should make provisions to ensure they can handle the entire data - // set. - List(*ListContentRequest, Content_ListServer) error - // Delete will delete the referenced object. - Delete(context.Context, *DeleteContentRequest) (*types.Empty, error) - // Read allows one to read an object based on the offset into the content. - // - // The requested data may be returned in one or more messages. - Read(*ReadContentRequest, Content_ReadServer) error - // Status returns the status for a single reference. - Status(context.Context, *StatusRequest) (*StatusResponse, error) - // ListStatuses returns the status of ongoing object ingestions, started via - // Write. - // - // Only those matching the regular expression will be provided in the - // response. If the provided regular expression is empty, all ingestions - // will be provided. - ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error) - // Write begins or resumes writes to a resource identified by a unique ref. - // Only one active stream may exist at a time for each ref. - // - // Once a write stream has started, it may only write to a single ref, thus - // once a stream is started, the ref may be omitted on subsequent writes. - // - // For any write transaction represented by a ref, only a single write may - // be made to a given offset. If overlapping writes occur, it is an error. - // Writes should be sequential and implementations may throw an error if - // this is required. - // - // If expected_digest is set and already part of the content store, the - // write will fail. - // - // When completed, the commit flag should be set to true. If expected size - // or digest is set, the content will be validated against those values. - Write(Content_WriteServer) error - // Abort cancels the ongoing write named in the request. Any resources - // associated with the write will be collected. - Abort(context.Context, *AbortRequest) (*types.Empty, error) -} - -// UnimplementedContentServer can be embedded to have forward compatible implementations. -type UnimplementedContentServer struct { -} - -func (*UnimplementedContentServer) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Info not implemented") -} -func (*UnimplementedContentServer) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Update not implemented") -} -func (*UnimplementedContentServer) List(req *ListContentRequest, srv Content_ListServer) error { - return status.Errorf(codes.Unimplemented, "method List not implemented") -} -func (*UnimplementedContentServer) Delete(ctx context.Context, req *DeleteContentRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") -} -func (*UnimplementedContentServer) Read(req *ReadContentRequest, srv Content_ReadServer) error { - return status.Errorf(codes.Unimplemented, "method Read not implemented") -} -func (*UnimplementedContentServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedContentServer) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListStatuses not implemented") -} -func (*UnimplementedContentServer) Write(srv Content_WriteServer) error { - return status.Errorf(codes.Unimplemented, "method Write not implemented") -} -func (*UnimplementedContentServer) Abort(ctx context.Context, req *AbortRequest) (*types.Empty, error) { - return nil, status.Errorf(codes.Unimplemented, "method Abort not implemented") -} - -func RegisterContentServer(s *grpc.Server, srv ContentServer) { - s.RegisterService(&_Content_serviceDesc, srv) -} - -func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InfoRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Info(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Info", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Info(ctx, req.(*InfoRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Update(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Update", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Update(ctx, req.(*UpdateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ListContentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContentServer).List(m, &contentListServer{stream}) -} - -type Content_ListServer interface { - Send(*ListContentResponse) error - grpc.ServerStream -} - -type contentListServer struct { - grpc.ServerStream -} - -func (x *contentListServer) Send(m *ListContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteContentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Delete(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Delete", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(ReadContentRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ContentServer).Read(m, &contentReadServer{stream}) -} - -type Content_ReadServer interface { - Send(*ReadContentResponse) error - grpc.ServerStream -} - -type contentReadServer struct { - grpc.ServerStream -} - -func (x *contentReadServer) Send(m *ReadContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatusRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Status(ctx, req.(*StatusRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListStatusesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).ListStatuses(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/ListStatuses", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ContentServer).Write(&contentWriteServer{stream}) -} - -type Content_WriteServer interface { - Send(*WriteContentResponse) error - Recv() (*WriteContentRequest, error) - grpc.ServerStream -} - -type contentWriteServer struct { - grpc.ServerStream -} - -func (x *contentWriteServer) Send(m *WriteContentResponse) error { - return x.ServerStream.SendMsg(m) -} - -func (x *contentWriteServer) Recv() (*WriteContentRequest, error) { - m := new(WriteContentRequest) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AbortRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ContentServer).Abort(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/containerd.services.content.v1.Content/Abort", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ContentServer).Abort(ctx, req.(*AbortRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Content_serviceDesc = grpc.ServiceDesc{ - ServiceName: "containerd.services.content.v1.Content", - HandlerType: (*ContentServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Info", - Handler: _Content_Info_Handler, - }, - { - MethodName: "Update", - Handler: _Content_Update_Handler, - }, - { - MethodName: "Delete", - Handler: _Content_Delete_Handler, - }, - { - MethodName: "Status", - Handler: _Content_Status_Handler, - }, - { - MethodName: "ListStatuses", - Handler: _Content_ListStatuses_Handler, - }, - { - MethodName: "Abort", - Handler: _Content_Abort_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "List", - Handler: _Content_List_Handler, - ServerStreams: true, - }, - { - StreamName: "Read", - Handler: _Content_Read_Handler, - ServerStreams: true, - }, - { - StreamName: "Write", - Handler: _Content_Write_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto", -} - -func (m *Info) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Info) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContent(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContent(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContent(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintContent(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x22 - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintContent(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x1a - if m.Size_ != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x10 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *InfoRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InfoRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *InfoResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InfoResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.UpdateMask != nil { - { - size, err := m.UpdateMask.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *UpdateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UpdateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ListContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Info) > 0 { - for iNdEx := len(m.Info) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Info[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Size_ != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x18 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x10 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Expected) > 0 { - i -= len(m.Expected) - copy(dAtA[i:], m.Expected) - i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) - i-- - dAtA[i] = 0x32 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x28 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x20 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x1a - } - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintContent(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x12 - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintContent(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Status != nil { - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListStatusesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListStatusesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintContent(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListStatusesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListStatusesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Statuses) > 0 { - for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintContent(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteContentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteContentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintContent(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintContent(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintContent(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintContent(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x32 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x28 - } - if len(m.Expected) > 0 { - i -= len(m.Expected) - copy(dAtA[i:], m.Expected) - i = encodeVarintContent(dAtA, i, uint64(len(m.Expected))) - i-- - dAtA[i] = 0x22 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x18 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0x12 - } - if m.Action != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WriteContentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WriteContentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintContent(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0x32 - } - if m.Total != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x28 - } - if m.Offset != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x20 - } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintContent(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x1a - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt):]) - if err11 != nil { - return 0, err11 - } - i -= n11 - i = encodeVarintContent(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0x12 - if m.Action != 0 { - i = encodeVarintContent(dAtA, i, uint64(m.Action)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *AbortRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AbortRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintContent(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintContent(dAtA []byte, offset int, v uint64) int { - offset -= sovContent(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Info) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Size_ != 0 { - n += 1 + sovContent(uint64(m.Size_)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) - n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InfoResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.UpdateMask != nil { - l = m.UpdateMask.Size() - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UpdateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Info.Size() - n += 1 + l + sovContent(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Info) > 0 { - for _, e := range m.Info { - l = e.Size() - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Size_ != 0 { - n += 1 + sovContent(uint64(m.Size_)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Status) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Expected) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Status != nil { - l = m.Status.Size() - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListStatusesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListStatusesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovContent(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WriteContentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovContent(uint64(m.Action)) - } - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Expected) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v))) - n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WriteContentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Action != 0 { - n += 1 + sovContent(uint64(m.Action)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt) - n += 1 + l + sovContent(uint64(l)) - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt) - n += 1 + l + sovContent(uint64(l)) - if m.Offset != 0 { - n += 1 + sovContent(uint64(m.Offset)) - } - if m.Total != 0 { - n += 1 + sovContent(uint64(m.Total)) - } - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *AbortRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovContent(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovContent(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozContent(x uint64) (n int) { - return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Info) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&Info{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `CreatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *InfoRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *InfoResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InfoResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateRequest{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "types.FieldMask", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *UpdateResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&UpdateResponse{`, - `Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListContentRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListContentResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForInfo := "[]Info{" - for _, f := range this.Info { - repeatedStringForInfo += strings.Replace(strings.Replace(f.String(), "Info", "Info", 1), `&`, ``, 1) + "," - } - repeatedStringForInfo += "}" - s := strings.Join([]string{`&ListContentResponse{`, - `Info:` + repeatedStringForInfo + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DeleteContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeleteContentRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ReadContentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadContentRequest{`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ReadContentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReadContentResponse{`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Status) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Status{`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatusRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusRequest{`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *StatusResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatusResponse{`, - `Status:` + strings.Replace(this.Status.String(), "Status", "Status", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListStatusesRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ListStatusesRequest{`, - `Filters:` + fmt.Sprintf("%v", this.Filters) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ListStatusesResponse) String() string { - if this == nil { - return "nil" - } - repeatedStringForStatuses := "[]Status{" - for _, f := range this.Statuses { - repeatedStringForStatuses += strings.Replace(strings.Replace(f.String(), "Status", "Status", 1), `&`, ``, 1) + "," - } - repeatedStringForStatuses += "}" - s := strings.Join([]string{`&ListStatusesResponse{`, - `Statuses:` + repeatedStringForStatuses + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WriteContentRequest) String() string { - if this == nil { - return "nil" - } - keysForLabels := make([]string, 0, len(this.Labels)) - for k, _ := range this.Labels { - keysForLabels = append(keysForLabels, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - mapStringForLabels := "map[string]string{" - for _, k := range keysForLabels { - mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) - } - mapStringForLabels += "}" - s := strings.Join([]string{`&WriteContentRequest{`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Expected:` + fmt.Sprintf("%v", this.Expected) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `Labels:` + mapStringForLabels + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *WriteContentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WriteContentResponse{`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `UpdatedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "types.Timestamp", 1), `&`, ``, 1) + `,`, - `Offset:` + fmt.Sprintf("%v", this.Offset) + `,`, - `Total:` + fmt.Sprintf("%v", this.Total) + `,`, - `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *AbortRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AbortRequest{`, - `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringContent(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Info) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InfoResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.UpdateMask == nil { - m.UpdateMask = &types.FieldMask{} - } - if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UpdateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UpdateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Info = append(m.Info, Info{}) - if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Status == nil { - m.Status = &Status{} - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListStatusesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListStatusesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListStatusesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListStatusesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListStatusesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListStatusesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Statuses = append(m.Statuses, Status{}) - if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteContentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteContentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= WriteAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthContent - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthContent - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WriteContentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WriteContentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WriteContentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - m.Action = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Action |= WriteAction(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AbortRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AbortRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AbortRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowContent - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthContent - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthContent - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipContent(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthContent - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipContent(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowContent - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthContent - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupContent - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthContent - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowContent = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupContent = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto deleted file mode 100644 index b33ea5b2e8c4..000000000000 --- a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto +++ /dev/null @@ -1,334 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -syntax = "proto3"; - -package containerd.services.content.v1; - -import weak "gogoproto/gogo.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/empty.proto"; - -option go_package = "github.com/containerd/containerd/api/services/content/v1;content"; - -// Content provides access to a content addressable storage system. -service Content { - // Info returns information about a committed object. - // - // This call can be used for getting the size of content and checking for - // existence. - rpc Info(InfoRequest) returns (InfoResponse); - - // Update updates content metadata. - // - // This call can be used to manage the mutable content labels. The - // immutable metadata such as digest, size, and committed at cannot - // be updated. - rpc Update(UpdateRequest) returns (UpdateResponse); - - // List streams the entire set of content as Info objects and closes the - // stream. - // - // Typically, this will yield a large response, chunked into messages. - // Clients should make provisions to ensure they can handle the entire data - // set. - rpc List(ListContentRequest) returns (stream ListContentResponse); - - // Delete will delete the referenced object. - rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty); - - // Read allows one to read an object based on the offset into the content. - // - // The requested data may be returned in one or more messages. - rpc Read(ReadContentRequest) returns (stream ReadContentResponse); - - // Status returns the status for a single reference. - rpc Status(StatusRequest) returns (StatusResponse); - - // ListStatuses returns the status of ongoing object ingestions, started via - // Write. - // - // Only those matching the regular expression will be provided in the - // response. If the provided regular expression is empty, all ingestions - // will be provided. - rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse); - - // Write begins or resumes writes to a resource identified by a unique ref. - // Only one active stream may exist at a time for each ref. - // - // Once a write stream has started, it may only write to a single ref, thus - // once a stream is started, the ref may be omitted on subsequent writes. - // - // For any write transaction represented by a ref, only a single write may - // be made to a given offset. If overlapping writes occur, it is an error. - // Writes should be sequential and implementations may throw an error if - // this is required. - // - // If expected_digest is set and already part of the content store, the - // write will fail. - // - // When completed, the commit flag should be set to true. If expected size - // or digest is set, the content will be validated against those values. - rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse); - - // Abort cancels the ongoing write named in the request. Any resources - // associated with the write will be collected. - rpc Abort(AbortRequest) returns (google.protobuf.Empty); -} - -message Info { - // Digest is the hash identity of the blob. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - - // Size is the total number of bytes in the blob. - int64 size = 2; - - // CreatedAt provides the time at which the blob was committed. - google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - - // UpdatedAt provides the time the info was last updated. - google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - - // Labels are arbitrary data on snapshots. - // - // The combined size of a key/value pair cannot exceed 4096 bytes. - map labels = 5; -} - -message InfoRequest { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; -} - -message InfoResponse { - Info info = 1 [(gogoproto.nullable) = false]; -} - -message UpdateRequest { - Info info = 1 [(gogoproto.nullable) = false]; - - // UpdateMask specifies which fields to perform the update on. If empty, - // the operation applies to all fields. - // - // In info, Digest, Size, and CreatedAt are immutable, - // other field may be updated using this mask. - // If no mask is provided, all mutable field are updated. - google.protobuf.FieldMask update_mask = 2; -} - -message UpdateResponse { - Info info = 1 [(gogoproto.nullable) = false]; -} - -message ListContentRequest { - // Filters contains one or more filters using the syntax defined in the - // containerd filter package. - // - // The returned result will be those that match any of the provided - // filters. Expanded, containers that match the following will be - // returned: - // - // filters[0] or filters[1] or ... or filters[n-1] or filters[n] - // - // If filters is zero-length or nil, all items will be returned. - repeated string filters = 1; -} - -message ListContentResponse { - repeated Info info = 1 [(gogoproto.nullable) = false]; -} - -message DeleteContentRequest { - // Digest specifies which content to delete. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; -} - -// ReadContentRequest defines the fields that make up a request to read a portion of -// data from a stored object. -message ReadContentRequest { - // Digest is the hash identity to read. - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - - // Offset specifies the number of bytes from the start at which to begin - // the read. If zero or less, the read will be from the start. This uses - // standard zero-indexed semantics. - int64 offset = 2; - - // size is the total size of the read. If zero, the entire blob will be - // returned by the service. - int64 size = 3; -} - -// ReadContentResponse carries byte data for a read request. -message ReadContentResponse { - int64 offset = 1; // offset of the returned data - bytes data = 2; // actual data -} - -message Status { - google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - string ref = 3; - int64 offset = 4; - int64 total = 5; - string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; -} - - -message StatusRequest { - string ref = 1; -} - -message StatusResponse { - Status status = 1; -} - -message ListStatusesRequest { - repeated string filters = 1; -} - -message ListStatusesResponse { - repeated Status statuses = 1 [(gogoproto.nullable) = false]; -} - -// WriteAction defines the behavior of a WriteRequest. -enum WriteAction { - option (gogoproto.goproto_enum_prefix) = false; - option (gogoproto.enum_customname) = "WriteAction"; - - // WriteActionStat instructs the writer to return the current status while - // holding the lock on the write. - STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"]; - - // WriteActionWrite sets the action for the write request to write data. - // - // Any data included will be written at the provided offset. The - // transaction will be left open for further writes. - // - // This is the default. - WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"]; - - // WriteActionCommit will write any outstanding data in the message and - // commit the write, storing it under the digest. - // - // This can be used in a single message to send the data, verify it and - // commit it. - // - // This action will always terminate the write. - COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"]; -} - -// WriteContentRequest writes data to the request ref at offset. -message WriteContentRequest { - // Action sets the behavior of the write. - // - // When this is a write and the ref is not yet allocated, the ref will be - // allocated and the data will be written at offset. - // - // If the action is write and the ref is allocated, it will accept data to - // an offset that has not yet been written. - // - // If the action is write and there is no data, the current write status - // will be returned. This works differently from status because the stream - // holds a lock. - WriteAction action = 1; - - // Ref identifies the pre-commit object to write to. - string ref = 2; - - // Total can be set to have the service validate the total size of the - // committed content. - // - // The latest value before or with the commit action message will be use to - // validate the content. If the offset overflows total, the service may - // report an error. It is only required on one message for the write. - // - // If the value is zero or less, no validation of the final content will be - // performed. - int64 total = 3; - - // Expected can be set to have the service validate the final content against - // the provided digest. - // - // If the digest is already present in the object store, an AlreadyExists - // error will be returned. - // - // Only the latest version will be used to check the content against the - // digest. It is only required to include it on a single message, before or - // with the commit action message. - string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - - // Offset specifies the number of bytes from the start at which to begin - // the write. For most implementations, this means from the start of the - // file. This uses standard, zero-indexed semantics. - // - // If the action is write, the remote may remove all previously written - // data after the offset. Implementations may support arbitrary offsets but - // MUST support reseting this value to zero with a write. If an - // implementation does not support a write at a particular offset, an - // OutOfRange error must be returned. - int64 offset = 5; - - // Data is the actual bytes to be written. - // - // If this is empty and the message is not a commit, a response will be - // returned with the current write state. - bytes data = 6; - - // Labels are arbitrary data on snapshots. - // - // The combined size of a key/value pair cannot exceed 4096 bytes. - map labels = 7; -} - -// WriteContentResponse is returned on the culmination of a write call. -message WriteContentResponse { - // Action contains the action for the final message of the stream. A writer - // should confirm that they match the intended result. - WriteAction action = 1; - - // StartedAt provides the time at which the write began. - // - // This must be set for stat and commit write actions. All other write - // actions may omit this. - google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - - // UpdatedAt provides the last time of a successful write. - // - // This must be set for stat and commit write actions. All other write - // actions may omit this. - google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - - // Offset is the current committed size for the write. - int64 offset = 4; - - // Total provides the current, expected total size of the write. - // - // We include this to provide consistency with the Status structure on the - // client writer. - // - // This is only valid on the Stat and Commit response. - int64 total = 5; - - // Digest, if present, includes the digest up to the currently committed - // bytes. If action is commit, this field will be set. It is implementation - // defined if this is set for other actions. - string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; -} - -message AbortRequest { - string ref = 1; -} diff --git a/vendor/github.com/containerd/containerd/content/adaptor.go b/vendor/github.com/containerd/containerd/content/adaptor.go deleted file mode 100644 index 88bad2610e8a..000000000000 --- a/vendor/github.com/containerd/containerd/content/adaptor.go +++ /dev/null @@ -1,52 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "strings" - - "github.com/containerd/containerd/filters" -) - -// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. -func AdaptInfo(info Info) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - - switch fieldpath[0] { - case "digest": - return info.Digest.String(), true - case "size": - // TODO: support size based filtering - case "labels": - return checkMap(fieldpath[1:], info.Labels) - } - - return "", false - }) -} - -func checkMap(fieldpath []string, m map[string]string) (string, bool) { - if len(m) == 0 { - return "", false - } - - value, ok := m[strings.Join(fieldpath, ".")] - return value, ok -} diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go deleted file mode 100644 index ff17a8417b39..000000000000 --- a/vendor/github.com/containerd/containerd/content/content.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "io" - "time" - - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer -type ReaderAt interface { - io.ReaderAt - io.Closer - Size() int64 -} - -// Provider provides a reader interface for specific content -type Provider interface { - // ReaderAt only requires desc.Digest to be set. - // Other fields in the descriptor may be used internally for resolving - // the location of the actual data. - ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) -} - -// Ingester writes content -type Ingester interface { - // Some implementations require WithRef to be included in opts. - Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) -} - -// Info holds content specific information -// -// TODO(stevvooe): Consider a very different name for this struct. Info is way -// to general. It also reads very weird in certain context, like pluralization. -type Info struct { - Digest digest.Digest - Size int64 - CreatedAt time.Time - UpdatedAt time.Time - Labels map[string]string -} - -// Status of a content operation -type Status struct { - Ref string - Offset int64 - Total int64 - Expected digest.Digest - StartedAt time.Time - UpdatedAt time.Time -} - -// WalkFunc defines the callback for a blob walk. -type WalkFunc func(Info) error - -// Manager provides methods for inspecting, listing and removing content. -type Manager interface { - // Info will return metadata about content available in the content store. - // - // If the content is not present, ErrNotFound will be returned. - Info(ctx context.Context, dgst digest.Digest) (Info, error) - - // Update updates mutable information related to content. - // If one or more fieldpaths are provided, only those - // fields will be updated. - // Mutable fields: - // labels.* - Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) - - // Walk will call fn for each item in the content store which - // match the provided filters. If no filters are given all - // items will be walked. - Walk(ctx context.Context, fn WalkFunc, filters ...string) error - - // Delete removes the content from the store. - Delete(ctx context.Context, dgst digest.Digest) error -} - -// IngestManager provides methods for managing ingests. -type IngestManager interface { - // Status returns the status of the provided ref. - Status(ctx context.Context, ref string) (Status, error) - - // ListStatuses returns the status of any active ingestions whose ref match the - // provided regular expression. If empty, all active ingestions will be - // returned. - ListStatuses(ctx context.Context, filters ...string) ([]Status, error) - - // Abort completely cancels the ingest operation targeted by ref. - Abort(ctx context.Context, ref string) error -} - -// Writer handles the write of content into a content store -type Writer interface { - // Close closes the writer, if the writer has not been - // committed this allows resuming or aborting. - // Calling Close on a closed writer will not error. - io.WriteCloser - - // Digest may return empty digest or panics until committed. - Digest() digest.Digest - - // Commit commits the blob (but no roll-back is guaranteed on an error). - // size and expected can be zero-value when unknown. - // Commit always closes the writer, even on error. - // ErrAlreadyExists aborts the writer. - Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error - - // Status returns the current state of write - Status() (Status, error) - - // Truncate updates the size of the target blob - Truncate(size int64) error -} - -// Store combines the methods of content-oriented interfaces into a set that -// are commonly provided by complete implementations. -type Store interface { - Manager - Provider - IngestManager - Ingester -} - -// Opt is used to alter the mutable properties of content -type Opt func(*Info) error - -// WithLabels allows labels to be set on content -func WithLabels(labels map[string]string) Opt { - return func(info *Info) error { - info.Labels = labels - return nil - } -} - -// WriterOpts is internally used by WriterOpt. -type WriterOpts struct { - Ref string - Desc ocispec.Descriptor -} - -// WriterOpt is used for passing options to Ingester.Writer. -type WriterOpt func(*WriterOpts) error - -// WithDescriptor specifies an OCI descriptor. -// Writer may optionally use the descriptor internally for resolving -// the location of the actual data. -// Write does not require any field of desc to be set. -// If the data size is unknown, desc.Size should be set to 0. -// Some implementations may also accept negative values as "unknown". -func WithDescriptor(desc ocispec.Descriptor) WriterOpt { - return func(opts *WriterOpts) error { - opts.Desc = desc - return nil - } -} - -// WithRef specifies a ref string. -func WithRef(ref string) WriterOpt { - return func(opts *WriterOpts) error { - opts.Ref = ref - return nil - } -} diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go deleted file mode 100644 index 4c4a35308e67..000000000000 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ /dev/null @@ -1,275 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package content - -import ( - "context" - "io" - "io/ioutil" - "math/rand" - "sync" - "time" - - "github.com/containerd/containerd/errdefs" - "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// NewReader returns a io.Reader from a ReaderAt -func NewReader(ra ReaderAt) io.Reader { - rd := io.NewSectionReader(ra, 0, ra.Size()) - return rd -} - -// ReadBlob retrieves the entire contents of the blob from the provider. -// -// Avoid using this for large blobs, such as layers. -func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { - ra, err := provider.ReaderAt(ctx, desc) - if err != nil { - return nil, err - } - defer ra.Close() - - p := make([]byte, ra.Size()) - - n, err := ra.ReadAt(p, 0) - if err == io.EOF { - if int64(n) != ra.Size() { - err = io.ErrUnexpectedEOF - } else { - err = nil - } - } - return p, err -} - -// WriteBlob writes data with the expected digest into the content store. If -// expected already exists, the method returns immediately and the reader will -// not be consumed. -// -// This is useful when the digest and size are known beforehand. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { - cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) - if err != nil { - if !errdefs.IsAlreadyExists(err) { - return errors.Wrap(err, "failed to open writer") - } - - return nil // all ready present - } - defer cw.Close() - - return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) -} - -// OpenWriter opens a new writer for the given reference, retrying if the writer -// is locked until the reference is available or returns an error. -func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { - var ( - cw Writer - err error - retry = 16 - ) - for { - cw, err = cs.Writer(ctx, opts...) - if err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - // TODO: Check status to determine if the writer is active, - // continue waiting while active, otherwise return lock - // error or abort. Requires asserting for an ingest manager - - select { - case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))): - if retry < 2048 { - retry = retry << 1 - } - continue - case <-ctx.Done(): - // Propagate lock error - return nil, err - } - - } - break - } - - return cw, err -} - -// Copy copies data with the expected digest from the reader into the -// provided content store writer. This copy commits the writer. -// -// This is useful when the digest and size are known beforehand. When -// the size or digest is unknown, these values may be empty. -// -// Copy is buffered, so no need to wrap reader in buffered io. -func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error { - ws, err := cw.Status() - if err != nil { - return errors.Wrap(err, "failed to get status") - } - - if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, size) - if err != nil { - return errors.Wrapf(err, "unable to resume write to %v", ws.Ref) - } - } - - if _, err := copyWithBuffer(cw, r); err != nil { - return errors.Wrap(err, "failed to copy") - } - - if err := cw.Commit(ctx, size, expected, opts...); err != nil { - if !errdefs.IsAlreadyExists(err) { - return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) - } - } - - return nil -} - -// CopyReaderAt copies to a writer from a given reader at for the given -// number of bytes. This copy does not commit the writer. -func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { - ws, err := cw.Status() - if err != nil { - return err - } - - _, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) - return err -} - -// CopyReader copies to a writer from a given reader, returning -// the number of bytes copied. -// Note: if the writer has a non-zero offset, the total number -// of bytes read may be greater than those copied if the reader -// is not an io.Seeker. -// This copy does not commit the writer. -func CopyReader(cw Writer, r io.Reader) (int64, error) { - ws, err := cw.Status() - if err != nil { - return 0, errors.Wrap(err, "failed to get status") - } - - if ws.Offset > 0 { - r, err = seekReader(r, ws.Offset, 0) - if err != nil { - return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) - } - } - - return copyWithBuffer(cw, r) -} - -// seekReader attempts to seek the reader to the given offset, either by -// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding -// up to the given offset. -func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { - // attempt to resolve r as a seeker and setup the offset. - seeker, ok := r.(io.Seeker) - if ok { - nn, err := seeker.Seek(offset, io.SeekStart) - if nn != offset { - return nil, errors.Wrapf(err, "failed to seek to offset %v", offset) - } - - if err != nil { - return nil, err - } - - return r, nil - } - - // ok, let's try io.ReaderAt! - readerAt, ok := r.(io.ReaderAt) - if ok && size > offset { - sr := io.NewSectionReader(readerAt, offset, size) - return sr, nil - } - - // well then, let's just discard up to the offset - n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset)) - if err != nil { - return nil, errors.Wrap(err, "failed to discard to offset") - } - if n != offset { - return nil, errors.Errorf("unable to discard to offset") - } - - return r, nil -} - -// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer -// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have -// a full buffer before we do a write operation to dst to reduce overheads associated -// with the write operations of small buffers. -func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { - // If the reader has a WriteTo method, use it to do the copy. - // Avoids an allocation and a copy. - if wt, ok := src.(io.WriterTo); ok { - return wt.WriteTo(dst) - } - // Similarly, if the writer has a ReadFrom method, use it to do the copy. - if rt, ok := dst.(io.ReaderFrom); ok { - return rt.ReadFrom(src) - } - bufRef := bufPool.Get().(*[]byte) - defer bufPool.Put(bufRef) - buf := *bufRef - for { - nr, er := io.ReadAtLeast(src, buf, len(buf)) - if nr > 0 { - nw, ew := dst.Write(buf[0:nr]) - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er != nil { - // If an EOF happens after reading fewer than the requested bytes, - // ReadAtLeast returns ErrUnexpectedEOF. - if er != io.EOF && er != io.ErrUnexpectedEOF { - err = er - } - break - } - } - return -} diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go deleted file mode 100644 index d1d2d564df24..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/locks.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "sync" - "time" - - "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" -) - -// Handles locking references - -type lock struct { - since time.Time -} - -var ( - // locks lets us lock in process - locks = make(map[string]*lock) - locksMu sync.Mutex -) - -func tryLock(ref string) error { - locksMu.Lock() - defer locksMu.Unlock() - - if v, ok := locks[ref]; ok { - return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since) - } - - locks[ref] = &lock{time.Now()} - return nil -} - -func unlock(ref string) { - locksMu.Lock() - defer locksMu.Unlock() - - delete(locks, ref) -} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go deleted file mode 100644 index 5d3ae0390382..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/readerat.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - - "github.com/pkg/errors" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" -) - -// readerat implements io.ReaderAt in a completely stateless manner by opening -// the referenced file for each call to ReadAt. -type sizeReaderAt struct { - size int64 - fp *os.File -} - -// OpenReader creates ReaderAt from a file -func OpenReader(p string) (content.ReaderAt, error) { - fi, err := os.Stat(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") - } - - fp, err := os.Open(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") - } - - return sizeReaderAt{size: fi.Size(), fp: fp}, nil -} - -func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { - return ra.fp.ReadAt(p, offset) -} - -func (ra sizeReaderAt) Size() int64 { - return ra.size -} - -func (ra sizeReaderAt) Close() error { - return ra.fp.Close() -} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go deleted file mode 100644 index 314d913673bb..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store.go +++ /dev/null @@ -1,701 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "fmt" - "io" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/filters" - "github.com/containerd/containerd/log" - "github.com/sirupsen/logrus" - - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// LabelStore is used to store mutable labels for digests -type LabelStore interface { - // Get returns all the labels for the given digest - Get(digest.Digest) (map[string]string, error) - - // Set sets all the labels for a given digest - Set(digest.Digest, map[string]string) error - - // Update replaces the given labels for a digest, - // a key with an empty value removes a label. - Update(digest.Digest, map[string]string) (map[string]string, error) -} - -// Store is digest-keyed store for content. All data written into the store is -// stored under a verifiable digest. -// -// Store can generally support multi-reader, single-writer ingest of data, -// including resumable ingest. -type store struct { - root string - ls LabelStore -} - -// NewStore returns a local content store -func NewStore(root string) (content.Store, error) { - return NewLabeledStore(root, nil) -} - -// NewLabeledStore returns a new content store using the provided label store -// -// Note: content stores which are used underneath a metadata store may not -// require labels and should use `NewStore`. `NewLabeledStore` is primarily -// useful for tests or standalone implementations. -func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { - if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil { - return nil, err - } - - return &store{ - root: root, - ls: ls, - }, nil -} - -func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - p, err := s.blobPath(dgst) - if err != nil { - return content.Info{}, errors.Wrapf(err, "calculating blob info path") - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) - } - - return content.Info{}, err - } - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return content.Info{}, err - } - } - return s.info(dgst, fi, labels), nil -} - -func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { - return content.Info{ - Digest: dgst, - Size: fi.Size(), - CreatedAt: fi.ModTime(), - UpdatedAt: getATime(fi), - Labels: labels, - } -} - -// ReaderAt returns an io.ReaderAt for the blob. -func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - p, err := s.blobPath(desc.Digest) - if err != nil { - return nil, errors.Wrapf(err, "calculating blob path for ReaderAt") - } - - reader, err := OpenReader(p) - if err != nil { - return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p) - } - - return reader, nil -} - -// Delete removes a blob by its digest. -// -// While this is safe to do concurrently, safe exist-removal logic must hold -// some global lock on the store. -func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { - bp, err := s.blobPath(dgst) - if err != nil { - return errors.Wrapf(err, "calculating blob path for delete") - } - - if err := os.RemoveAll(bp); err != nil { - if !os.IsNotExist(err) { - return err - } - - return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) - } - - return nil -} - -func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - if s.ls == nil { - return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") - } - - p, err := s.blobPath(info.Digest) - if err != nil { - return content.Info{}, errors.Wrapf(err, "calculating blob path for update") - } - - fi, err := os.Stat(p) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest) - } - - return content.Info{}, err - } - - var ( - all bool - labels map[string]string - ) - if len(fieldpaths) > 0 { - for _, path := range fieldpaths { - if strings.HasPrefix(path, "labels.") { - if labels == nil { - labels = map[string]string{} - } - - key := strings.TrimPrefix(path, "labels.") - labels[key] = info.Labels[key] - continue - } - - switch path { - case "labels": - all = true - labels = info.Labels - default: - return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) - } - } - } else { - all = true - labels = info.Labels - } - - if all { - err = s.ls.Set(info.Digest, labels) - } else { - labels, err = s.ls.Update(info.Digest, labels) - } - if err != nil { - return content.Info{}, err - } - - info = s.info(info.Digest, fi, labels) - info.UpdatedAt = time.Now() - - if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { - log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) - } - - return info, nil -} - -func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { - root := filepath.Join(s.root, "blobs") - - filter, err := filters.ParseAll(fs...) - if err != nil { - return err - } - - var alg digest.Algorithm - return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - if !fi.IsDir() && !alg.Available() { - return nil - } - - // TODO(stevvooe): There are few more cases with subdirs that should be - // handled in case the layout gets corrupted. This isn't strict enough - // and may spew bad data. - - if path == root { - return nil - } - if filepath.Dir(path) == root { - alg = digest.Algorithm(filepath.Base(path)) - - if !alg.Available() { - alg = "" - return filepath.SkipDir - } - - // descending into a hash directory - return nil - } - - dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) - if err := dgst.Validate(); err != nil { - // log error but don't report - log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") - // if we see this, it could mean some sort of corruption of the - // store or extra paths not expected previously. - } - - var labels map[string]string - if s.ls != nil { - labels, err = s.ls.Get(dgst) - if err != nil { - return err - } - } - - info := s.info(dgst, fi, labels) - if !filter.Match(content.AdaptInfo(info)) { - return nil - } - return fn(info) - }) -} - -func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { - return s.status(s.ingestRoot(ref)) -} - -func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - return nil, err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return nil, err - } - - filter, err := filters.ParseAll(fs...) - if err != nil { - return nil, err - } - - var active []content.Status - for _, fi := range fis { - p := filepath.Join(s.root, "ingest", fi.Name()) - stat, err := s.status(p) - if err != nil { - if !os.IsNotExist(err) { - return nil, err - } - - // TODO(stevvooe): This is a common error if uploads are being - // completed while making this listing. Need to consider taking a - // lock on the whole store to coordinate this aspect. - // - // Another option is to cleanup downloads asynchronously and - // coordinate this method with the cleanup process. - // - // For now, we just skip them, as they really don't exist. - continue - } - - if filter.Match(adaptStatus(stat)) { - active = append(active, stat) - } - } - - return active, nil -} - -// WalkStatusRefs is used to walk all status references -// Failed status reads will be logged and ignored, if -// this function is called while references are being altered, -// these error messages may be produced. -func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { - fp, err := os.Open(filepath.Join(s.root, "ingest")) - if err != nil { - return err - } - - defer fp.Close() - - fis, err := fp.Readdir(-1) - if err != nil { - return err - } - - for _, fi := range fis { - rf := filepath.Join(s.root, "ingest", fi.Name(), "ref") - - ref, err := readFileString(rf) - if err != nil { - log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") - continue - } - - if err := fn(ref); err != nil { - return err - } - } - - return nil -} - -// status works like stat above except uses the path to the ingest. -func (s *store) status(ingestPath string) (content.Status, error) { - dp := filepath.Join(ingestPath, "data") - fi, err := os.Stat(dp) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) - } - return content.Status{}, err - } - - ref, err := readFileString(filepath.Join(ingestPath, "ref")) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) - } - return content.Status{}, err - } - - startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) - if err != nil { - return content.Status{}, errors.Wrapf(err, "could not read startedat") - } - - updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) - if err != nil { - return content.Status{}, errors.Wrapf(err, "could not read updatedat") - } - - // because we don't write updatedat on every write, the mod time may - // actually be more up to date. - if fi.ModTime().After(updatedAt) { - updatedAt = fi.ModTime() - } - - return content.Status{ - Ref: ref, - Offset: fi.Size(), - Total: s.total(ingestPath), - UpdatedAt: updatedAt, - StartedAt: startedAt, - }, nil -} - -func adaptStatus(status content.Status) filters.Adaptor { - return filters.AdapterFunc(func(fieldpath []string) (string, bool) { - if len(fieldpath) == 0 { - return "", false - } - switch fieldpath[0] { - case "ref": - return status.Ref, true - } - - return "", false - }) -} - -// total attempts to resolve the total expected size for the write. -func (s *store) total(ingestPath string) int64 { - totalS, err := readFileString(filepath.Join(ingestPath, "total")) - if err != nil { - return 0 - } - - total, err := strconv.ParseInt(totalS, 10, 64) - if err != nil { - // represents a corrupted file, should probably remove. - return 0 - } - - return total -} - -// Writer begins or resumes the active writer identified by ref. If the writer -// is already in use, an error is returned. Only one writer may be in use per -// ref at a time. -// -// The argument `ref` is used to uniquely identify a long-lived writer transaction. -func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - // TODO(AkihiroSuda): we could create a random string or one calculated based on the context - // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 - if wOpts.Ref == "" { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") - } - var lockErr error - for count := uint64(0); count < 10; count++ { - if err := tryLock(wOpts.Ref); err != nil { - if !errdefs.IsUnavailable(err) { - return nil, err - } - - lockErr = err - } else { - lockErr = nil - break - } - time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 && status.Total > 0 && total != status.Total { - return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total) - } - - // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes - fp, err := os.Open(data) - if err != nil { - return status, err - } - - p := bufPool.Get().(*[]byte) - status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) - bufPool.Put(p) - fp.Close() - return status, err -} - -// writer provides the main implementation of the Writer method. The caller -// must hold the lock correctly and release on error if there is a problem. -func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { - // TODO(stevvooe): Need to actually store expected here. We have - // code in the service that shouldn't be dealing with this. - if expected != "" { - p, err := s.blobPath(expected) - if err != nil { - return nil, errors.Wrap(err, "calculating expected blob path for writer") - } - if _, err := os.Stat(p); err == nil { - return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected) - } - } - - path, refp, data := s.ingestPaths(ref) - - var ( - digester = digest.Canonical.Digester() - offset int64 - startedAt time.Time - updatedAt time.Time - ) - - foundValidIngest := false - // ensure that the ingest path has been created. - if err := os.Mkdir(path, 0755); err != nil { - if !os.IsExist(err) { - return nil, err - } - status, err := s.resumeStatus(ref, total, digester) - if err == nil { - foundValidIngest = true - updatedAt = status.UpdatedAt - startedAt = status.StartedAt - total = status.Total - offset = status.Offset - } else { - logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) - } - } - - if !foundValidIngest { - startedAt = time.Now() - updatedAt = startedAt - - // the ingest is new, we need to setup the target location. - // write the ref to a file for later use - if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { - return nil, err - } - - if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { - return nil, err - } - - if total > 0 { - if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { - return nil, err - } - } - } - - fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) - if err != nil { - return nil, errors.Wrap(err, "failed to open data file") - } - - if _, err := fp.Seek(offset, io.SeekStart); err != nil { - return nil, errors.Wrap(err, "could not seek to current write offset") - } - - return &writer{ - s: s, - fp: fp, - ref: ref, - path: path, - offset: offset, - total: total, - digester: digester, - startedAt: startedAt, - updatedAt: updatedAt, - }, nil -} - -// Abort an active transaction keyed by ref. If the ingest is active, it will -// be cancelled. Any resources associated with the ingest will be cleaned. -func (s *store) Abort(ctx context.Context, ref string) error { - root := s.ingestRoot(ref) - if err := os.RemoveAll(root); err != nil { - if os.IsNotExist(err) { - return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref) - } - - return err - } - - return nil -} - -func (s *store) blobPath(dgst digest.Digest) (string, error) { - if err := dgst.Validate(); err != nil { - return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err) - } - - return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil -} - -func (s *store) ingestRoot(ref string) string { - // we take a digest of the ref to keep the ingest paths constant length. - // Note that this is not the current or potential digest of incoming content. - dgst := digest.FromString(ref) - return filepath.Join(s.root, "ingest", dgst.Hex()) -} - -// ingestPaths are returned. The paths are the following: -// -// - root: entire ingest directory -// - ref: name of the starting ref, must be unique -// - data: file where data is written -// -func (s *store) ingestPaths(ref string) (string, string, string) { - var ( - fp = s.ingestRoot(ref) - rp = filepath.Join(fp, "ref") - dp = filepath.Join(fp, "data") - ) - - return fp, rp, dp -} - -func readFileString(path string) (string, error) { - p, err := ioutil.ReadFile(path) - return string(p), err -} - -// readFileTimestamp reads a file with just a timestamp present. -func readFileTimestamp(p string) (time.Time, error) { - b, err := ioutil.ReadFile(p) - if err != nil { - if os.IsNotExist(err) { - err = errors.Wrap(errdefs.ErrNotFound, err.Error()) - } - return time.Time{}, err - } - - var t time.Time - if err := t.UnmarshalText(b); err != nil { - return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p) - } - - return t, nil -} - -func writeTimestampFile(p string, t time.Time) error { - b, err := t.MarshalText() - if err != nil { - return err - } - return atomicWrite(p, b, 0666) -} - -func atomicWrite(path string, data []byte, mode os.FileMode) error { - tmp := fmt.Sprintf("%s.tmp", path) - f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) - if err != nil { - return errors.Wrap(err, "create tmp file") - } - _, err = f.Write(data) - f.Close() - if err != nil { - return errors.Wrap(err, "write atomic data") - } - return os.Rename(tmp, path) -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_bsd.go b/vendor/github.com/containerd/containerd/content/local/store_bsd.go deleted file mode 100644 index da149a2fdae1..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_bsd.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build darwin freebsd netbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. - } - - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go deleted file mode 100644 index f34f0dad2b98..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. - } - - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go deleted file mode 100644 index 69a74bab0eab..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build linux solaris - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "syscall" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - if st, ok := fi.Sys().(*syscall.Stat_t); ok { - return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. - } - - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go deleted file mode 100644 index bce849979099..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/store_windows.go +++ /dev/null @@ -1,26 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "os" - "time" -) - -func getATime(fi os.FileInfo) time.Time { - return fi.ModTime() -} diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go deleted file mode 100644 index 0a11f4d912ea..000000000000 --- a/vendor/github.com/containerd/containerd/content/local/writer.go +++ /dev/null @@ -1,207 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package local - -import ( - "context" - "io" - "os" - "path/filepath" - "runtime" - "time" - - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// writer represents a write transaction against the blob store. -type writer struct { - s *store - fp *os.File // opened data file - path string // path to writer dir - ref string // ref key - offset int64 - total int64 - digester digest.Digester - startedAt time.Time - updatedAt time.Time -} - -func (w *writer) Status() (content.Status, error) { - return content.Status{ - Ref: w.ref, - Offset: w.offset, - Total: w.total, - StartedAt: w.startedAt, - UpdatedAt: w.updatedAt, - }, nil -} - -// Digest returns the current digest of the content, up to the current write. -// -// Cannot be called concurrently with `Write`. -func (w *writer) Digest() digest.Digest { - return w.digester.Digest() -} - -// Write p to the transaction. -// -// Note that writes are unbuffered to the backing file. When writing, it is -// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. -func (w *writer) Write(p []byte) (n int, err error) { - n, err = w.fp.Write(p) - w.digester.Hash().Write(p[:n]) - w.offset += int64(len(p)) - w.updatedAt = time.Now() - return n, err -} - -func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { - // Ensure even on error the writer is fully closed - defer unlock(w.ref) - - var base content.Info - for _, opt := range opts { - if err := opt(&base); err != nil { - return err - } - } - - fp := w.fp - w.fp = nil - - if fp == nil { - return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") - } - - if err := fp.Sync(); err != nil { - fp.Close() - return errors.Wrap(err, "sync failed") - } - - fi, err := fp.Stat() - closeErr := fp.Close() - if err != nil { - return errors.Wrap(err, "stat on ingest file failed") - } - if closeErr != nil { - return errors.Wrap(err, "failed to close ingest file") - } - - if size > 0 && size != fi.Size() { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size) - } - - dgst := w.digester.Digest() - if expected != "" && expected != dgst { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) - } - - var ( - ingest = filepath.Join(w.path, "data") - target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst - ) - - // make sure parent directories of blob exist - if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { - return err - } - - if _, err := os.Stat(target); err == nil { - // collision with the target file! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") - } - return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) - } - - if err := os.Rename(ingest, target); err != nil { - return err - } - - // Ingest has now been made available in the content store, attempt to complete - // setting metadata but errors should only be logged and not returned since - // the content store cannot be cleanly rolled back. - - commitTime := time.Now() - if err := os.Chtimes(target, commitTime, commitTime); err != nil { - log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time") - } - - // clean up!! - if err := os.RemoveAll(w.path); err != nil { - log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") - } - - if w.s.ls != nil && base.Labels != nil { - if err := w.s.ls.Set(dgst, base.Labels); err != nil { - log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels") - } - } - - // change to readonly, more important for read, but provides _some_ - // protection from this point on. We use the existing perms with a mask - // only allowing reads honoring the umask on creation. - // - // This removes write and exec, only allowing read per the creation umask. - // - // NOTE: Windows does not support this operation - if runtime.GOOS != "windows" { - if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { - log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly") - } - } - - return nil -} - -// Close the writer, flushing any unwritten data and leaving the progress in -// tact. -// -// If one needs to resume the transaction, a new writer can be obtained from -// `Ingester.Writer` using the same key. The write can then be continued -// from it was left off. -// -// To abandon a transaction completely, first call close then `IngestManager.Abort` to -// clean up the associated resources. -func (w *writer) Close() (err error) { - if w.fp != nil { - w.fp.Sync() - err = w.fp.Close() - writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) - w.fp = nil - unlock(w.ref) - return - } - - return nil -} - -func (w *writer) Truncate(size int64) error { - if size != 0 { - return errors.New("Truncate: unsupported size") - } - w.offset = 0 - w.digester.Hash().Reset() - if _, err := w.fp.Seek(0, io.SeekStart); err != nil { - return err - } - return w.fp.Truncate(0) -} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_reader.go b/vendor/github.com/containerd/containerd/content/proxy/content_reader.go deleted file mode 100644 index 2947a7c8217d..000000000000 --- a/vendor/github.com/containerd/containerd/content/proxy/content_reader.go +++ /dev/null @@ -1,71 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package proxy - -import ( - "context" - - contentapi "github.com/containerd/containerd/api/services/content/v1" - digest "github.com/opencontainers/go-digest" -) - -type remoteReaderAt struct { - ctx context.Context - digest digest.Digest - size int64 - client contentapi.ContentClient -} - -func (ra *remoteReaderAt) Size() int64 { - return ra.size -} - -func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) { - rr := &contentapi.ReadContentRequest{ - Digest: ra.digest, - Offset: off, - Size_: int64(len(p)), - } - // we need a child context with cancel, or the eventually called - // grpc.NewStream will leak the goroutine until the whole thing is cleared. - // See comment at https://godoc.org/google.golang.org/grpc#ClientConn.NewStream - childCtx, cancel := context.WithCancel(ra.ctx) - // we MUST cancel the child context; see comment above - defer cancel() - rc, err := ra.client.Read(childCtx, rr) - if err != nil { - return 0, err - } - - for len(p) > 0 { - var resp *contentapi.ReadContentResponse - // fill our buffer up until we can fill p. - resp, err = rc.Recv() - if err != nil { - return n, err - } - - copied := copy(p, resp.Data) - n += copied - p = p[copied:] - } - return n, nil -} - -func (ra *remoteReaderAt) Close() error { - return nil -} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_store.go b/vendor/github.com/containerd/containerd/content/proxy/content_store.go deleted file mode 100644 index 217b746516a1..000000000000 --- a/vendor/github.com/containerd/containerd/content/proxy/content_store.go +++ /dev/null @@ -1,234 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package proxy - -import ( - "context" - "io" - - contentapi "github.com/containerd/containerd/api/services/content/v1" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - protobuftypes "github.com/gogo/protobuf/types" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" -) - -type proxyContentStore struct { - client contentapi.ContentClient -} - -// NewContentStore returns a new content store which communicates over a GRPC -// connection using the containerd content GRPC API. -func NewContentStore(client contentapi.ContentClient) content.Store { - return &proxyContentStore{ - client: client, - } -} - -func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{ - Digest: dgst, - }) - if err != nil { - return content.Info{}, errdefs.FromGRPC(err) - } - - return infoFromGRPC(resp.Info), nil -} - -func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { - session, err := pcs.client.List(ctx, &contentapi.ListContentRequest{ - Filters: filters, - }) - if err != nil { - return errdefs.FromGRPC(err) - } - - for { - msg, err := session.Recv() - if err != nil { - if err != io.EOF { - return errdefs.FromGRPC(err) - } - - break - } - - for _, info := range msg.Info { - if err := fn(infoFromGRPC(info)); err != nil { - return err - } - } - } - - return nil -} - -func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error { - if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{ - Digest: dgst, - }); err != nil { - return errdefs.FromGRPC(err) - } - - return nil -} - -// ReaderAt ignores MediaType. -func (pcs *proxyContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - i, err := pcs.Info(ctx, desc.Digest) - if err != nil { - return nil, err - } - - return &remoteReaderAt{ - ctx: ctx, - digest: desc.Digest, - size: i.Size, - client: pcs.client, - }, nil -} - -func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.Status, error) { - resp, err := pcs.client.Status(ctx, &contentapi.StatusRequest{ - Ref: ref, - }) - if err != nil { - return content.Status{}, errdefs.FromGRPC(err) - } - - status := resp.Status - return content.Status{ - Ref: status.Ref, - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, - Offset: status.Offset, - Total: status.Total, - Expected: status.Expected, - }, nil -} - -func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{ - Info: infoToGRPC(info), - UpdateMask: &protobuftypes.FieldMask{ - Paths: fieldpaths, - }, - }) - if err != nil { - return content.Info{}, errdefs.FromGRPC(err) - } - return infoFromGRPC(resp.Info), nil -} - -func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) { - resp, err := pcs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{ - Filters: filters, - }) - if err != nil { - return nil, errdefs.FromGRPC(err) - } - - var statuses []content.Status - for _, status := range resp.Statuses { - statuses = append(statuses, content.Status{ - Ref: status.Ref, - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, - Offset: status.Offset, - Total: status.Total, - Expected: status.Expected, - }) - } - - return statuses, nil -} - -// Writer ignores MediaType. -func (pcs *proxyContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - var wOpts content.WriterOpts - for _, opt := range opts { - if err := opt(&wOpts); err != nil { - return nil, err - } - } - wrclient, offset, err := pcs.negotiate(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest) - if err != nil { - return nil, errdefs.FromGRPC(err) - } - - return &remoteWriter{ - ref: wOpts.Ref, - client: wrclient, - offset: offset, - }, nil -} - -// Abort implements asynchronous abort. It starts a new write session on the ref l -func (pcs *proxyContentStore) Abort(ctx context.Context, ref string) error { - if _, err := pcs.client.Abort(ctx, &contentapi.AbortRequest{ - Ref: ref, - }); err != nil { - return errdefs.FromGRPC(err) - } - - return nil -} - -func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) { - wrclient, err := pcs.client.Write(ctx) - if err != nil { - return nil, 0, err - } - - if err := wrclient.Send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionStat, - Ref: ref, - Total: size, - Expected: expected, - }); err != nil { - return nil, 0, err - } - - resp, err := wrclient.Recv() - if err != nil { - return nil, 0, err - } - - return wrclient, resp.Offset, nil -} - -func infoToGRPC(info content.Info) contentapi.Info { - return contentapi.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, - Labels: info.Labels, - } -} - -func infoFromGRPC(info contentapi.Info) content.Info { - return content.Info{ - Digest: info.Digest, - Size: info.Size_, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, - Labels: info.Labels, - } -} diff --git a/vendor/github.com/containerd/containerd/content/proxy/content_writer.go b/vendor/github.com/containerd/containerd/content/proxy/content_writer.go deleted file mode 100644 index 842333598878..000000000000 --- a/vendor/github.com/containerd/containerd/content/proxy/content_writer.go +++ /dev/null @@ -1,146 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package proxy - -import ( - "context" - "io" - - contentapi "github.com/containerd/containerd/api/services/content/v1" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type remoteWriter struct { - ref string - client contentapi.Content_WriteClient - offset int64 - digest digest.Digest -} - -// send performs a synchronous req-resp cycle on the client. -func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) { - if err := rw.client.Send(req); err != nil { - return nil, err - } - - resp, err := rw.client.Recv() - - if err == nil { - // try to keep these in sync - if resp.Digest != "" { - rw.digest = resp.Digest - } - } - - return resp, err -} - -func (rw *remoteWriter) Status() (content.Status, error) { - resp, err := rw.send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionStat, - }) - if err != nil { - return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status") - } - - return content.Status{ - Ref: rw.ref, - Offset: resp.Offset, - Total: resp.Total, - StartedAt: resp.StartedAt, - UpdatedAt: resp.UpdatedAt, - }, nil -} - -func (rw *remoteWriter) Digest() digest.Digest { - return rw.digest -} - -func (rw *remoteWriter) Write(p []byte) (n int, err error) { - offset := rw.offset - - resp, err := rw.send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionWrite, - Offset: offset, - Data: p, - }) - if err != nil { - return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write") - } - - n = int(resp.Offset - offset) - if n < len(p) { - err = io.ErrShortWrite - } - - rw.offset += int64(n) - if resp.Digest != "" { - rw.digest = resp.Digest - } - return -} - -func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) (err error) { - defer func() { - err1 := rw.Close() - if err == nil { - err = err1 - } - }() - - var base content.Info - for _, opt := range opts { - if err := opt(&base); err != nil { - return err - } - } - resp, err := rw.send(&contentapi.WriteContentRequest{ - Action: contentapi.WriteActionCommit, - Total: size, - Offset: rw.offset, - Expected: expected, - Labels: base.Labels, - }) - if err != nil { - return errors.Wrap(errdefs.FromGRPC(err), "commit failed") - } - - if size != 0 && resp.Offset != size { - return errors.Errorf("unexpected size: %v != %v", resp.Offset, size) - } - - if expected != "" && resp.Digest != expected { - return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected) - } - - rw.digest = resp.Digest - rw.offset = resp.Offset - return nil -} - -func (rw *remoteWriter) Truncate(size int64) error { - // This truncation won't actually be validated until a write is issued. - rw.offset = size - return nil -} - -func (rw *remoteWriter) Close() error { - return rw.client.CloseSend() -} diff --git a/vendor/github.com/containerd/containerd/defaults/defaults.go b/vendor/github.com/containerd/containerd/defaults/defaults.go deleted file mode 100644 index 6f5b122ecf93..000000000000 --- a/vendor/github.com/containerd/containerd/defaults/defaults.go +++ /dev/null @@ -1,32 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package defaults - -const ( - // DefaultMaxRecvMsgSize defines the default maximum message size for - // receiving protobufs passed over the GRPC API. - DefaultMaxRecvMsgSize = 16 << 20 - // DefaultMaxSendMsgSize defines the default maximum message size for - // sending protobufs passed over the GRPC API. - DefaultMaxSendMsgSize = 16 << 20 - // DefaultRuntimeNSLabel defines the namespace label to check for the - // default runtime - DefaultRuntimeNSLabel = "containerd.io/defaults/runtime" - // DefaultSnapshotterNSLabel defines the namespace label to check for the - // default snapshotter - DefaultSnapshotterNSLabel = "containerd.io/defaults/snapshotter" -) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go b/vendor/github.com/containerd/containerd/defaults/defaults_unix.go deleted file mode 100644 index 6b69cd06b9b8..000000000000 --- a/vendor/github.com/containerd/containerd/defaults/defaults_unix.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package defaults - -const ( - // DefaultRootDir is the default location used by containerd to store - // persistent data - DefaultRootDir = "/var/lib/containerd" - // DefaultStateDir is the default location used by containerd to store - // transient data - DefaultStateDir = "/run/containerd" - // DefaultAddress is the default unix socket address - DefaultAddress = "/run/containerd/containerd.sock" - // DefaultDebugAddress is the default unix socket address for pprof data - DefaultDebugAddress = "/run/containerd/debug.sock" - // DefaultFIFODir is the default location used by client-side cio library - // to store FIFOs. - DefaultFIFODir = "/run/containerd/fifo" - // DefaultRuntime is the default linux runtime - DefaultRuntime = "io.containerd.runc.v2" - // DefaultConfigDir is the default location for config files. - DefaultConfigDir = "/etc/containerd" -) diff --git a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go b/vendor/github.com/containerd/containerd/defaults/defaults_windows.go deleted file mode 100644 index a80700075f99..000000000000 --- a/vendor/github.com/containerd/containerd/defaults/defaults_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build windows - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package defaults - -import ( - "os" - "path/filepath" -) - -var ( - // DefaultRootDir is the default location used by containerd to store - // persistent data - DefaultRootDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "root") - // DefaultStateDir is the default location used by containerd to store - // transient data - DefaultStateDir = filepath.Join(os.Getenv("ProgramData"), "containerd", "state") - - // DefaultConfigDir is the default location for config files. - DefaultConfigDir = filepath.Join(os.Getenv("programfiles"), "containerd") -) - -const ( - // DefaultAddress is the default winpipe address - DefaultAddress = `\\.\pipe\containerd-containerd` - // DefaultDebugAddress is the default winpipe address for pprof data - DefaultDebugAddress = `\\.\pipe\containerd-debug` - // DefaultFIFODir is the default location used by client-side cio library - // to store FIFOs. Unused on Windows. - DefaultFIFODir = "" - // DefaultRuntime is the default windows runtime - DefaultRuntime = "io.containerd.runhcs.v1" -) diff --git a/vendor/github.com/containerd/containerd/defaults/doc.go b/vendor/github.com/containerd/containerd/defaults/doc.go deleted file mode 100644 index 6da863ce2e89..000000000000 --- a/vendor/github.com/containerd/containerd/defaults/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package defaults provides several common defaults for interacting with -// containerd. These can be used on the client-side or server-side. -package defaults diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go deleted file mode 100644 index 5a9c559c1e0c..000000000000 --- a/vendor/github.com/containerd/containerd/filters/adaptor.go +++ /dev/null @@ -1,33 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -// Adaptor specifies the mapping of fieldpaths to a type. For the given field -// path, the value and whether it is present should be returned. The mapping of -// the fieldpath to a field is deferred to the adaptor implementation, but -// should generally follow protobuf field path/mask semantics. -type Adaptor interface { - Field(fieldpath []string) (value string, present bool) -} - -// AdapterFunc allows implementation specific matching of fieldpaths -type AdapterFunc func(fieldpath []string) (string, bool) - -// Field returns the field name and true if it exists -func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { - return fn(fieldpath) -} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go deleted file mode 100644 index cf09d8d9e4fd..000000000000 --- a/vendor/github.com/containerd/containerd/filters/filter.go +++ /dev/null @@ -1,179 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -// Package filters defines a syntax and parser that can be used for the -// filtration of items across the containerd API. The core is built on the -// concept of protobuf field paths, with quoting. Several operators allow the -// user to flexibly select items based on field presence, equality, inequality -// and regular expressions. Flexible adaptors support working with any type. -// -// The syntax is fairly familiar, if you've used container ecosystem -// projects. At the core, we base it on the concept of protobuf field -// paths, augmenting with the ability to quote portions of the field path -// to match arbitrary labels. These "selectors" come in the following -// syntax: -// -// ``` -// [] -// ``` -// -// A basic example is as follows: -// -// ``` -// name==foo -// ``` -// -// This would match all objects that have a field `name` with the value -// `foo`. If we only want to test if the field is present, we can omit the -// operator. This is most useful for matching labels in containerd. The -// following will match objects that have the field "labels" and have the -// label "foo" defined: -// -// ``` -// labels.foo -// ``` -// -// We also allow for quoting of parts of the field path to allow matching -// of arbitrary items: -// -// ``` -// labels."very complex label"==something -// ``` -// -// We also define `!=` and `~=` as operators. The `!=` will match all -// objects that don't match the value for a field and `~=` will compile the -// target value as a regular expression and match the field value against that. -// -// Selectors can be combined using a comma, such that the resulting -// selector will require all selectors are matched for the object to match. -// The following example will match objects that are named `foo` and have -// the label `bar`: -// -// ``` -// name==foo,labels.bar -// ``` -// -package filters - -import ( - "regexp" - - "github.com/containerd/containerd/log" -) - -// Filter matches specific resources based the provided filter -type Filter interface { - Match(adaptor Adaptor) bool -} - -// FilterFunc is a function that handles matching with an adaptor -type FilterFunc func(Adaptor) bool - -// Match matches the FilterFunc returning true if the object matches the filter -func (fn FilterFunc) Match(adaptor Adaptor) bool { - return fn(adaptor) -} - -// Always is a filter that always returns true for any type of object -var Always FilterFunc = func(adaptor Adaptor) bool { - return true -} - -// Any allows multiple filters to be matched against the object -type Any []Filter - -// Match returns true if any of the provided filters are true -func (m Any) Match(adaptor Adaptor) bool { - for _, m := range m { - if m.Match(adaptor) { - return true - } - } - - return false -} - -// All allows multiple filters to be matched against the object -type All []Filter - -// Match only returns true if all filters match the object -func (m All) Match(adaptor Adaptor) bool { - for _, m := range m { - if !m.Match(adaptor) { - return false - } - } - - return true -} - -type operator int - -const ( - operatorPresent = iota - operatorEqual - operatorNotEqual - operatorMatches -) - -func (op operator) String() string { - switch op { - case operatorPresent: - return "?" - case operatorEqual: - return "==" - case operatorNotEqual: - return "!=" - case operatorMatches: - return "~=" - } - - return "unknown" -} - -type selector struct { - fieldpath []string - operator operator - value string - re *regexp.Regexp -} - -func (m selector) Match(adaptor Adaptor) bool { - value, present := adaptor.Field(m.fieldpath) - - switch m.operator { - case operatorPresent: - return present - case operatorEqual: - return present && value == m.value - case operatorNotEqual: - return value != m.value - case operatorMatches: - if m.re == nil { - r, err := regexp.Compile(m.value) - if err != nil { - log.L.Errorf("error compiling regexp %q", m.value) - return false - } - - m.re = r - } - - return m.re.MatchString(value) - default: - return false - } -} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go deleted file mode 100644 index 0825d668caf0..000000000000 --- a/vendor/github.com/containerd/containerd/filters/parser.go +++ /dev/null @@ -1,292 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "fmt" - "io" - - "github.com/containerd/containerd/errdefs" - "github.com/pkg/errors" -) - -/* -Parse the strings into a filter that may be used with an adaptor. - -The filter is made up of zero or more selectors. - -The format is a comma separated list of expressions, in the form of -``, known as selectors. All selectors must match the -target object for the filter to be true. - -We define the operators "==" for equality, "!=" for not equal and "~=" for a -regular expression. If the operator and value are not present, the matcher will -test for the presence of a value, as defined by the target object. - -The formal grammar is as follows: - -selectors := selector ("," selector)* -selector := fieldpath (operator value) -fieldpath := field ('.' field)* -field := quoted | [A-Za-z] [A-Za-z0-9_]+ -operator := "==" | "!=" | "~=" -value := quoted | [^\s,]+ -quoted := - -*/ -func Parse(s string) (Filter, error) { - // special case empty to match all - if s == "" { - return Always, nil - } - - p := parser{input: s} - return p.parse() -} - -// ParseAll parses each filter in ss and returns a filter that will return true -// if any filter matches the expression. -// -// If no filters are provided, the filter will match anything. -func ParseAll(ss ...string) (Filter, error) { - if len(ss) == 0 { - return Always, nil - } - - var fs []Filter - for _, s := range ss { - f, err := Parse(s) - if err != nil { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) - } - - fs = append(fs, f) - } - - return Any(fs), nil -} - -type parser struct { - input string - scanner scanner -} - -func (p *parser) parse() (Filter, error) { - p.scanner.init(p.input) - - ss, err := p.selectors() - if err != nil { - return nil, errors.Wrap(err, "filters") - } - - return ss, nil -} - -func (p *parser) selectors() (Filter, error) { - s, err := p.selector() - if err != nil { - return nil, err - } - - ss := All{s} - -loop: - for { - tok := p.scanner.peek() - switch tok { - case ',': - pos, tok, _ := p.scanner.scan() - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a separator") - } - - s, err := p.selector() - if err != nil { - return nil, err - } - - ss = append(ss, s) - case tokenEOF: - break loop - default: - return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) - } - } - - return ss, nil -} - -func (p *parser) selector() (selector, error) { - fieldpath, err := p.fieldpath() - if err != nil { - return selector{}, err - } - - switch p.scanner.peek() { - case ',', tokenSeparator, tokenEOF: - return selector{ - fieldpath: fieldpath, - operator: operatorPresent, - }, nil - } - - op, err := p.operator() - if err != nil { - return selector{}, err - } - - var allowAltQuotes bool - if op == operatorMatches { - allowAltQuotes = true - } - - value, err := p.value(allowAltQuotes) - if err != nil { - if err == io.EOF { - return selector{}, io.ErrUnexpectedEOF - } - return selector{}, err - } - - return selector{ - fieldpath: fieldpath, - value: value, - operator: op, - }, nil -} - -func (p *parser) fieldpath() ([]string, error) { - f, err := p.field() - if err != nil { - return nil, err - } - - fs := []string{f} -loop: - for { - tok := p.scanner.peek() // lookahead to consume field separator - - switch tok { - case '.': - pos, tok, _ := p.scanner.scan() // consume separator - if tok != tokenSeparator { - return nil, p.mkerr(pos, "expected a field separator (`.`)") - } - - f, err := p.field() - if err != nil { - return nil, err - } - - fs = append(fs, f) - default: - // let the layer above handle the other bad cases. - break loop - } - } - - return fs, nil -} - -func (p *parser) field() (string, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, false) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected field or quoted") -} - -func (p *parser) operator() (operator, error) { - pos, tok, s := p.scanner.scan() - switch tok { - case tokenOperator: - switch s { - case "==": - return operatorEqual, nil - case "!=": - return operatorNotEqual, nil - case "~=": - return operatorMatches, nil - default: - return 0, p.mkerr(pos, "unsupported operator %q", s) - } - case tokenIllegal: - return 0, p.mkerr(pos, p.scanner.err) - } - - return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) -} - -func (p *parser) value(allowAltQuotes bool) (string, error) { - pos, tok, s := p.scanner.scan() - - switch tok { - case tokenValue, tokenField: - return s, nil - case tokenQuoted: - return p.unquote(pos, s, allowAltQuotes) - case tokenIllegal: - return "", p.mkerr(pos, p.scanner.err) - } - - return "", p.mkerr(pos, "expected value or quoted") -} - -func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { - if !allowAlts && s[0] != '\'' && s[0] != '"' { - return "", p.mkerr(pos, "invalid quote encountered") - } - - uq, err := unquote(s) - if err != nil { - return "", p.mkerr(pos, "unquoting failed: %v", err) - } - - return uq, nil -} - -type parseError struct { - input string - pos int - msg string -} - -func (pe parseError) Error() string { - if pe.pos < len(pe.input) { - before := pe.input[:pe.pos] - location := pe.input[pe.pos : pe.pos+1] // need to handle end - after := pe.input[pe.pos+1:] - - return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) - } - - return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) -} - -func (p *parser) mkerr(pos int, format string, args ...interface{}) error { - return errors.Wrap(parseError{ - input: p.input, - pos: pos, - msg: fmt.Sprintf(format, args...), - }, "parse error") -} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go deleted file mode 100644 index 2d64e23a300f..000000000000 --- a/vendor/github.com/containerd/containerd/filters/quote.go +++ /dev/null @@ -1,253 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "unicode/utf8" - - "github.com/pkg/errors" -) - -// NOTE(stevvooe): Most of this code in this file is copied from the stdlib -// strconv package and modified to be able to handle quoting with `/` and `|` -// as delimiters. The copyright is held by the Go authors. - -var errQuoteSyntax = errors.New("quote syntax error") - -// UnquoteChar decodes the first character or byte in the escaped string -// or character literal represented by the string s. -// It returns four values: -// -// 1) value, the decoded Unicode code point or byte value; -// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; -// 3) tail, the remainder of the string after the character; and -// 4) an error that will be nil if the character is syntactically valid. -// -// The second argument, quote, specifies the type of literal being parsed -// and therefore which escaped quote character is permitted. -// If set to a single quote, it permits the sequence \' and disallows unescaped '. -// If set to a double quote, it permits \" and disallows unescaped ". -// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. -// -// This is from Go strconv package, modified to support `|` and `/` as double -// quotes for use with regular expressions. -func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { - // easy cases - switch c := s[0]; { - case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): - err = errQuoteSyntax - return - case c >= utf8.RuneSelf: - r, size := utf8.DecodeRuneInString(s) - return r, true, s[size:], nil - case c != '\\': - return rune(s[0]), false, s[1:], nil - } - - // hard case: c is backslash - if len(s) <= 1 { - err = errQuoteSyntax - return - } - c := s[1] - s = s[2:] - - switch c { - case 'a': - value = '\a' - case 'b': - value = '\b' - case 'f': - value = '\f' - case 'n': - value = '\n' - case 'r': - value = '\r' - case 't': - value = '\t' - case 'v': - value = '\v' - case 'x', 'u', 'U': - n := 0 - switch c { - case 'x': - n = 2 - case 'u': - n = 4 - case 'U': - n = 8 - } - var v rune - if len(s) < n { - err = errQuoteSyntax - return - } - for j := 0; j < n; j++ { - x, ok := unhex(s[j]) - if !ok { - err = errQuoteSyntax - return - } - v = v<<4 | x - } - s = s[n:] - if c == 'x' { - // single-byte string, possibly not UTF-8 - value = v - break - } - if v > utf8.MaxRune { - err = errQuoteSyntax - return - } - value = v - multibyte = true - case '0', '1', '2', '3', '4', '5', '6', '7': - v := rune(c) - '0' - if len(s) < 2 { - err = errQuoteSyntax - return - } - for j := 0; j < 2; j++ { // one digit already; two more - x := rune(s[j]) - '0' - if x < 0 || x > 7 { - err = errQuoteSyntax - return - } - v = (v << 3) | x - } - s = s[2:] - if v > 255 { - err = errQuoteSyntax - return - } - value = v - case '\\': - value = '\\' - case '\'', '"', '|', '/': - if c != quote { - err = errQuoteSyntax - return - } - value = rune(c) - default: - err = errQuoteSyntax - return - } - tail = s - return -} - -// unquote interprets s as a single-quoted, double-quoted, -// or backquoted Go string literal, returning the string value -// that s quotes. (If s is single-quoted, it would be a Go -// character literal; Unquote returns the corresponding -// one-character string.) -// -// This is modified from the standard library to support `|` and `/` as quote -// characters for use with regular expressions. -func unquote(s string) (string, error) { - n := len(s) - if n < 2 { - return "", errQuoteSyntax - } - quote := s[0] - if quote != s[n-1] { - return "", errQuoteSyntax - } - s = s[1 : n-1] - - if quote == '`' { - if contains(s, '`') { - return "", errQuoteSyntax - } - if contains(s, '\r') { - // -1 because we know there is at least one \r to remove. - buf := make([]byte, 0, len(s)-1) - for i := 0; i < len(s); i++ { - if s[i] != '\r' { - buf = append(buf, s[i]) - } - } - return string(buf), nil - } - return s, nil - } - if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { - return "", errQuoteSyntax - } - if contains(s, '\n') { - return "", errQuoteSyntax - } - - // Is it trivial? Avoid allocation. - if !contains(s, '\\') && !contains(s, quote) { - switch quote { - case '"', '/', '|': // pipe and slash are treated like double quote - return s, nil - case '\'': - r, size := utf8.DecodeRuneInString(s) - if size == len(s) && (r != utf8.RuneError || size != 1) { - return s, nil - } - } - } - - var runeTmp [utf8.UTFMax]byte - buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. - for len(s) > 0 { - c, multibyte, ss, err := unquoteChar(s, quote) - if err != nil { - return "", err - } - s = ss - if c < utf8.RuneSelf || !multibyte { - buf = append(buf, byte(c)) - } else { - n := utf8.EncodeRune(runeTmp[:], c) - buf = append(buf, runeTmp[:n]...) - } - if quote == '\'' && len(s) != 0 { - // single-quoted must be single character - return "", errQuoteSyntax - } - } - return string(buf), nil -} - -// contains reports whether the string contains the byte c. -func contains(s string, c byte) bool { - for i := 0; i < len(s); i++ { - if s[i] == c { - return true - } - } - return false -} - -func unhex(b byte) (v rune, ok bool) { - c := rune(b) - switch { - case '0' <= c && c <= '9': - return c - '0', true - case 'a' <= c && c <= 'f': - return c - 'a' + 10, true - case 'A' <= c && c <= 'F': - return c - 'A' + 10, true - } - return -} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go deleted file mode 100644 index 6a485467b8a0..000000000000 --- a/vendor/github.com/containerd/containerd/filters/scanner.go +++ /dev/null @@ -1,297 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package filters - -import ( - "unicode" - "unicode/utf8" -) - -const ( - tokenEOF = -(iota + 1) - tokenQuoted - tokenValue - tokenField - tokenSeparator - tokenOperator - tokenIllegal -) - -type token rune - -func (t token) String() string { - switch t { - case tokenEOF: - return "EOF" - case tokenQuoted: - return "Quoted" - case tokenValue: - return "Value" - case tokenField: - return "Field" - case tokenSeparator: - return "Separator" - case tokenOperator: - return "Operator" - case tokenIllegal: - return "Illegal" - } - - return string(t) -} - -func (t token) GoString() string { - return "token" + t.String() -} - -type scanner struct { - input string - pos int - ppos int // bounds the current rune in the string - value bool - err string -} - -func (s *scanner) init(input string) { - s.input = input - s.pos = 0 - s.ppos = 0 -} - -func (s *scanner) next() rune { - if s.pos >= len(s.input) { - return tokenEOF - } - s.pos = s.ppos - - r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) - s.ppos += w - if r == utf8.RuneError { - if w > 0 { - s.error("rune error") - return tokenIllegal - } - return tokenEOF - } - - if r == 0 { - s.error("unexpected null") - return tokenIllegal - } - - return r -} - -func (s *scanner) peek() rune { - pos := s.pos - ppos := s.ppos - ch := s.next() - s.pos = pos - s.ppos = ppos - return ch -} - -func (s *scanner) scan() (nextp int, tk token, text string) { - var ( - ch = s.next() - pos = s.pos - ) - -chomp: - switch { - case ch == tokenEOF: - case ch == tokenIllegal: - case isQuoteRune(ch): - if !s.scanQuoted(ch) { - return pos, tokenIllegal, s.input[pos:s.ppos] - } - return pos, tokenQuoted, s.input[pos:s.ppos] - case isSeparatorRune(ch): - s.value = false - return pos, tokenSeparator, s.input[pos:s.ppos] - case isOperatorRune(ch): - s.scanOperator() - s.value = true - return pos, tokenOperator, s.input[pos:s.ppos] - case unicode.IsSpace(ch): - // chomp - ch = s.next() - pos = s.pos - goto chomp - case s.value: - s.scanValue() - s.value = false - return pos, tokenValue, s.input[pos:s.ppos] - case isFieldRune(ch): - s.scanField() - return pos, tokenField, s.input[pos:s.ppos] - } - - return s.pos, token(ch), "" -} - -func (s *scanner) scanField() { - for { - ch := s.peek() - if !isFieldRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanOperator() { - for { - ch := s.peek() - switch ch { - case '=', '!', '~': - s.next() - default: - return - } - } -} - -func (s *scanner) scanValue() { - for { - ch := s.peek() - if !isValueRune(ch) { - break - } - s.next() - } -} - -func (s *scanner) scanQuoted(quote rune) bool { - var illegal bool - ch := s.next() // read character after quote - for ch != quote { - if ch == '\n' || ch < 0 { - s.error("quoted literal not terminated") - return false - } - if ch == '\\' { - var legal bool - ch, legal = s.scanEscape(quote) - if !legal { - illegal = true - } - } else { - ch = s.next() - } - } - return !illegal -} - -func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { - ch = s.next() // read character after '/' - switch ch { - case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: - // nothing to do - ch = s.next() - legal = true - case '0', '1', '2', '3', '4', '5', '6', '7': - ch, legal = s.scanDigits(ch, 8, 3) - case 'x': - ch, legal = s.scanDigits(s.next(), 16, 2) - case 'u': - ch, legal = s.scanDigits(s.next(), 16, 4) - case 'U': - ch, legal = s.scanDigits(s.next(), 16, 8) - default: - s.error("illegal escape sequence") - } - return -} - -func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { - for n > 0 && digitVal(ch) < base { - ch = s.next() - n-- - } - if n > 0 { - s.error("illegal numeric escape sequence") - return ch, false - } - return ch, true -} - -func (s *scanner) error(msg string) { - if s.err == "" { - s.err = msg - } -} - -func digitVal(ch rune) int { - switch { - case '0' <= ch && ch <= '9': - return int(ch - '0') - case 'a' <= ch && ch <= 'f': - return int(ch - 'a' + 10) - case 'A' <= ch && ch <= 'F': - return int(ch - 'A' + 10) - } - return 16 // larger than any legal digit val -} - -func isFieldRune(r rune) bool { - return (r == '_' || isAlphaRune(r) || isDigitRune(r)) -} - -func isAlphaRune(r rune) bool { - return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' -} - -func isDigitRune(r rune) bool { - return r >= '0' && r <= '9' -} - -func isOperatorRune(r rune) bool { - switch r { - case '=', '!', '~': - return true - } - - return false -} - -func isQuoteRune(r rune) bool { - switch r { - case '/', '|', '"': // maybe add single quoting? - return true - } - - return false -} - -func isSeparatorRune(r rune) bool { - switch r { - case ',', '.': - return true - } - - return false -} - -func isValueRune(r rune) bool { - return r != ',' && !unicode.IsSpace(r) && - (unicode.IsLetter(r) || - unicode.IsDigit(r) || - unicode.IsNumber(r) || - unicode.IsGraphic(r) || - unicode.IsPunct(r)) -} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go deleted file mode 100644 index 8b0a87e755fc..000000000000 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go +++ /dev/null @@ -1,209 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package auth - -import ( - "context" - "encoding/json" - "net/http" - "net/url" - "strings" - "time" - - "github.com/containerd/containerd/log" - remoteserrors "github.com/containerd/containerd/remotes/errors" - "github.com/containerd/containerd/version" - "github.com/pkg/errors" - "golang.org/x/net/context/ctxhttp" -) - -var ( - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -// GenerateTokenOptions generates options for fetching a token based on a challenge -func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { - realm, ok := c.Parameters["realm"] - if !ok { - return TokenOptions{}, errors.New("no realm specified for token auth challenge") - } - - realmURL, err := url.Parse(realm) - if err != nil { - return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") - } - - to := TokenOptions{ - Realm: realmURL.String(), - Service: c.Parameters["service"], - Username: username, - Secret: secret, - } - - scope, ok := c.Parameters["scope"] - if ok { - to.Scopes = append(to.Scopes, scope) - } else { - log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") - } - - return to, nil -} - -// TokenOptions are options for requesting a token -type TokenOptions struct { - Realm string - Service string - Scopes []string - Username string - Secret string -} - -// OAuthTokenResponse is response from fetching token with a OAuth POST request -type OAuthTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -// FetchTokenWithOAuth fetches a token using a POST request -func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { - form := url.Values{} - if len(to.Scopes) > 0 { - form.Set("scope", strings.Join(to.Scopes, " ")) - } - form.Set("service", to.Service) - form.Set("client_id", clientID) - - if to.Username == "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", to.Secret) - } else { - form.Set("grant_type", "password") - form.Set("username", to.Username) - form.Set("password", to.Secret) - } - - req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") - for k, v := range headers { - req.Header[k] = append(req.Header[k], v...) - } - if len(req.Header.Get("User-Agent")) == 0 { - req.Header.Set("User-Agent", "containerd/"+version.Version) - } - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) - } - - decoder := json.NewDecoder(resp.Body) - - var tr OAuthTokenResponse - if err = decoder.Decode(&tr); err != nil { - return nil, errors.Wrap(err, "unable to decode token response") - } - - if tr.AccessToken == "" { - return nil, errors.WithStack(ErrNoToken) - } - - return &tr, nil -} - -// FetchTokenResponse is response from fetching token with GET request -type FetchTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -// FetchToken fetches a token using a GET request -func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { - req, err := http.NewRequest("GET", to.Realm, nil) - if err != nil { - return nil, err - } - - for k, v := range headers { - req.Header[k] = append(req.Header[k], v...) - } - if len(req.Header.Get("User-Agent")) == 0 { - req.Header.Set("User-Agent", "containerd/"+version.Version) - } - - reqParams := req.URL.Query() - - if to.Service != "" { - reqParams.Add("service", to.Service) - } - - for _, scope := range to.Scopes { - reqParams.Add("scope", scope) - } - - if to.Secret != "" { - req.SetBasicAuth(to.Username, to.Secret) - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := ctxhttp.Do(ctx, client, req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) - } - - decoder := json.NewDecoder(resp.Body) - - var tr FetchTokenResponse - if err = decoder.Decode(&tr); err != nil { - return nil, errors.Wrap(err, "unable to decode token response") - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return nil, errors.WithStack(ErrNoToken) - } - - return &tr, nil -} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go deleted file mode 100644 index 223fa2d0524f..000000000000 --- a/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go +++ /dev/null @@ -1,203 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package auth - -import ( - "net/http" - "sort" - "strings" -) - -// AuthenticationScheme defines scheme of the authentication method -type AuthenticationScheme byte - -const ( - // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 - BasicAuth AuthenticationScheme = 1 << iota - // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 - DigestAuth - // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 - BearerAuth -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // scheme is the auth-scheme according to RFC 2617 - Scheme AuthenticationScheme - - // parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -type byScheme []Challenge - -func (bs byScheme) Len() int { return len(bs) } -func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } - -// Sort in priority order: token > digest > basic -func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ParseAuthHeader parses challenges from WWW-Authenticate header -func ParseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - var s AuthenticationScheme - switch v { - case "basic": - s = BasicAuth - case "digest": - s = DigestAuth - case "bearer": - s = BearerAuth - default: - continue - } - challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) - } - sort.Stable(byScheme(challenges)) - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - for { - var pkey string - pkey, s = expectToken(skipSpace(s)) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - if !strings.HasPrefix(s, ",") { - return - } - s = s[1:] - } -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/remotes/errors/errors.go deleted file mode 100644 index 519dbac105f2..000000000000 --- a/vendor/github.com/containerd/containerd/remotes/errors/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package errors - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" -) - -var _ error = ErrUnexpectedStatus{} - -// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status -type ErrUnexpectedStatus struct { - Status string - StatusCode int - Body []byte - RequestURL, RequestMethod string -} - -func (e ErrUnexpectedStatus) Error() string { - return fmt.Sprintf("unexpected status: %s", e.Status) -} - -// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response -func NewUnexpectedStatusErr(resp *http.Response) error { - var b []byte - if resp.Body != nil { - b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB - } - err := ErrUnexpectedStatus{ - Body: b, - Status: resp.Status, - StatusCode: resp.StatusCode, - RequestMethod: resp.Request.Method, - } - if resp.Request.URL != nil { - err.RequestURL = resp.Request.URL.String() - } - return err -} diff --git a/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go b/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go deleted file mode 100644 index 7b6efdb3a538..000000000000 --- a/vendor/github.com/containerd/containerd/services/content/contentserver/contentserver.go +++ /dev/null @@ -1,463 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package contentserver - -import ( - "context" - "io" - "sync" - - api "github.com/containerd/containerd/api/services/content/v1" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" - ptypes "github.com/gogo/protobuf/types" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type service struct { - store content.Store -} - -var bufPool = sync.Pool{ - New: func() interface{} { - buffer := make([]byte, 1<<20) - return &buffer - }, -} - -// New returns the content GRPC server -func New(cs content.Store) api.ContentServer { - return &service{store: cs} -} - -func (s *service) Register(server *grpc.Server) error { - api.RegisterContentServer(server, s) - return nil -} - -func (s *service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) { - if err := req.Digest.Validate(); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest) - } - - bi, err := s.store.Info(ctx, req.Digest) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &api.InfoResponse{ - Info: infoToGRPC(bi), - }, nil -} - -func (s *service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) { - if err := req.Info.Digest.Validate(); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest) - } - - info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &api.UpdateResponse{ - Info: infoToGRPC(info), - }, nil -} - -func (s *service) List(req *api.ListContentRequest, session api.Content_ListServer) error { - var ( - buffer []api.Info - sendBlock = func(block []api.Info) error { - // send last block - return session.Send(&api.ListContentResponse{ - Info: block, - }) - } - ) - - if err := s.store.Walk(session.Context(), func(info content.Info) error { - buffer = append(buffer, api.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, - Labels: info.Labels, - }) - - if len(buffer) >= 100 { - if err := sendBlock(buffer); err != nil { - return err - } - - buffer = buffer[:0] - } - - return nil - }, req.Filters...); err != nil { - return errdefs.ToGRPC(err) - } - - if len(buffer) > 0 { - // send last block - if err := sendBlock(buffer); err != nil { - return err - } - } - - return nil -} - -func (s *service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*ptypes.Empty, error) { - log.G(ctx).WithField("digest", req.Digest).Debugf("delete content") - if err := req.Digest.Validate(); err != nil { - return nil, status.Errorf(codes.InvalidArgument, err.Error()) - } - - if err := s.store.Delete(ctx, req.Digest); err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &ptypes.Empty{}, nil -} - -func (s *service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error { - if err := req.Digest.Validate(); err != nil { - return status.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err) - } - - oi, err := s.store.Info(session.Context(), req.Digest) - if err != nil { - return errdefs.ToGRPC(err) - } - - ra, err := s.store.ReaderAt(session.Context(), ocispec.Descriptor{Digest: req.Digest}) - if err != nil { - return errdefs.ToGRPC(err) - } - defer ra.Close() - - var ( - offset = req.Offset - // size is read size, not the expected size of the blob (oi.Size), which the caller might not be aware of. - // offset+size can be larger than oi.Size. - size = req.Size_ - - // TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably - // little inefficient for work over a fast network. We can tune this later. - p = bufPool.Get().(*[]byte) - ) - defer bufPool.Put(p) - - if offset < 0 { - offset = 0 - } - - if offset > oi.Size { - return status.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size) - } - - if size <= 0 || offset+size > oi.Size { - size = oi.Size - offset - } - - _, err = io.CopyBuffer( - &readResponseWriter{session: session}, - io.NewSectionReader(ra, offset, size), *p) - return errdefs.ToGRPC(err) -} - -// readResponseWriter is a writer that places the output into ReadContentRequest messages. -// -// This allows io.CopyBuffer to do the heavy lifting of chunking the responses -// into the buffer size. -type readResponseWriter struct { - offset int64 - session api.Content_ReadServer -} - -func (rw *readResponseWriter) Write(p []byte) (n int, err error) { - if err := rw.session.Send(&api.ReadContentResponse{ - Offset: rw.offset, - Data: p, - }); err != nil { - return 0, err - } - - rw.offset += int64(len(p)) - return len(p), nil -} - -func (s *service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) { - status, err := s.store.Status(ctx, req.Ref) - if err != nil { - return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref) - } - - var resp api.StatusResponse - resp.Status = &api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, - Ref: status.Ref, - Offset: status.Offset, - Total: status.Total, - Expected: status.Expected, - } - - return &resp, nil -} - -func (s *service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) { - statuses, err := s.store.ListStatuses(ctx, req.Filters...) - if err != nil { - return nil, errdefs.ToGRPC(err) - } - - var resp api.ListStatusesResponse - for _, status := range statuses { - resp.Statuses = append(resp.Statuses, api.Status{ - StartedAt: status.StartedAt, - UpdatedAt: status.UpdatedAt, - Ref: status.Ref, - Offset: status.Offset, - Total: status.Total, - Expected: status.Expected, - }) - } - - return &resp, nil -} - -func (s *service) Write(session api.Content_WriteServer) (err error) { - var ( - ctx = session.Context() - msg api.WriteContentResponse - req *api.WriteContentRequest - ref string - total int64 - expected digest.Digest - ) - - defer func(msg *api.WriteContentResponse) { - // pump through the last message if no error was encountered - if err != nil { - if s, ok := status.FromError(err); ok && s.Code() != codes.AlreadyExists { - // TODO(stevvooe): Really need a log line here to track which - // errors are actually causing failure on the server side. May want - // to configure the service with an interceptor to make this work - // identically across all GRPC methods. - // - // This is pretty noisy, so we can remove it but leave it for now. - log.G(ctx).WithError(err).Error("(*service).Write failed") - } - - return - } - - err = session.Send(msg) - }(&msg) - - // handle the very first request! - req, err = session.Recv() - if err != nil { - return err - } - - ref = req.Ref - - if ref == "" { - return status.Errorf(codes.InvalidArgument, "first message must have a reference") - } - - fields := logrus.Fields{ - "ref": ref, - } - total = req.Total - expected = req.Expected - if total > 0 { - fields["total"] = total - } - - if expected != "" { - fields["expected"] = expected - } - - ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields)) - - log.G(ctx).Debug("(*service).Write started") - // this action locks the writer for the session. - wr, err := s.store.Writer(ctx, - content.WithRef(ref), - content.WithDescriptor(ocispec.Descriptor{Size: total, Digest: expected})) - if err != nil { - return errdefs.ToGRPC(err) - } - defer wr.Close() - - for { - msg.Action = req.Action - ws, err := wr.Status() - if err != nil { - return errdefs.ToGRPC(err) - } - - msg.Offset = ws.Offset // always set the offset. - - // NOTE(stevvooe): In general, there are two cases underwhich a remote - // writer is used. - // - // For pull, we almost always have this before fetching large content, - // through descriptors. We allow predeclaration of the expected size - // and digest. - // - // For push, it is more complex. If we want to cut through content into - // storage, we may have no expectation until we are done processing the - // content. The case here is the following: - // - // 1. Start writing content. - // 2. Compress inline. - // 3. Validate digest and size (maybe). - // - // Supporting these two paths is quite awkward but it lets both API - // users use the same writer style for each with a minimum of overhead. - if req.Expected != "" { - if expected != "" && expected != req.Expected { - log.G(ctx).Debugf("commit digest differs from writer digest: %v != %v", req.Expected, expected) - } - expected = req.Expected - - if _, err := s.store.Info(session.Context(), req.Expected); err == nil { - if err := wr.Close(); err != nil { - log.G(ctx).WithError(err).Error("failed to close writer") - } - if err := s.store.Abort(session.Context(), ref); err != nil { - log.G(ctx).WithError(err).Error("failed to abort write") - } - - return status.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected) - } - } - - if req.Total > 0 { - // Update the expected total. Typically, this could be seen at - // negotiation time or on a commit message. - if total > 0 && req.Total != total { - log.G(ctx).Debugf("commit size differs from writer size: %v != %v", req.Total, total) - } - total = req.Total - } - - switch req.Action { - case api.WriteActionStat: - msg.Digest = wr.Digest() - msg.StartedAt = ws.StartedAt - msg.UpdatedAt = ws.UpdatedAt - msg.Total = total - case api.WriteActionWrite, api.WriteActionCommit: - if req.Offset > 0 { - // validate the offset if provided - if req.Offset != ws.Offset { - return status.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset) - } - } - - if req.Offset == 0 && ws.Offset > 0 { - if err := wr.Truncate(req.Offset); err != nil { - return errors.Wrapf(err, "truncate failed") - } - msg.Offset = req.Offset - } - - // issue the write if we actually have data. - if len(req.Data) > 0 { - // While this looks like we could use io.WriterAt here, because we - // maintain the offset as append only, we just issue the write. - n, err := wr.Write(req.Data) - if err != nil { - return errdefs.ToGRPC(err) - } - - if n != len(req.Data) { - // TODO(stevvooe): Perhaps, we can recover this by including it - // in the offset on the write return. - return status.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data)) - } - - msg.Offset += int64(n) - } - - if req.Action == api.WriteActionCommit { - var opts []content.Opt - if req.Labels != nil { - opts = append(opts, content.WithLabels(req.Labels)) - } - if err := wr.Commit(ctx, total, expected, opts...); err != nil { - return errdefs.ToGRPC(err) - } - } - - msg.Digest = wr.Digest() - } - - if err := session.Send(&msg); err != nil { - return err - } - - req, err = session.Recv() - if err != nil { - if err == io.EOF { - return nil - } - - return err - } - } -} - -func (s *service) Abort(ctx context.Context, req *api.AbortRequest) (*ptypes.Empty, error) { - if err := s.store.Abort(ctx, req.Ref); err != nil { - return nil, errdefs.ToGRPC(err) - } - - return &ptypes.Empty{}, nil -} - -func infoToGRPC(info content.Info) api.Info { - return api.Info{ - Digest: info.Digest, - Size_: info.Size, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, - Labels: info.Labels, - } -} - -func infoFromGRPC(info api.Info) content.Info { - return content.Info{ - Digest: info.Digest, - Size: info.Size_, - CreatedAt: info.CreatedAt, - UpdatedAt: info.UpdatedAt, - Labels: info.Labels, - } -} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go deleted file mode 100644 index 9b53cb13e97e..000000000000 --- a/vendor/github.com/containerd/containerd/version/version.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package version - -import "runtime" - -var ( - // Package is filled at linking time - Package = "github.com/containerd/containerd" - - // Version holds the complete version number. Filled in at linking time. - Version = "1.5.5+unknown" - - // Revision is filled with the VCS (e.g. git) revision being used to build - // the program at linking time. - Revision = "" - - // GoVersion is Go tree's version. - GoVersion = runtime.Version() -) diff --git a/vendor/github.com/containerd/continuity/LICENSE b/vendor/github.com/containerd/continuity/LICENSE deleted file mode 100644 index 584149b6ee28..000000000000 --- a/vendor/github.com/containerd/continuity/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md deleted file mode 100644 index 6fa50e159c70..000000000000 --- a/vendor/github.com/containerd/continuity/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# continuity - -[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity) -[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity) - -A transport-agnostic, filesystem metadata manifest system - -This project is a staging area for experiments in providing transport agnostic -metadata storage. - -Please see https://github.com/opencontainers/specs/issues/11 for more details. - -## Manifest Format - -A continuity manifest encodes filesystem metadata in Protocol Buffers. -Please refer to [proto/manifest.proto](proto/manifest.proto). - -## Usage - -Build: - -```console -$ make -``` - -Create a manifest (of this repo itself): - -```console -$ ./bin/continuity build . > /tmp/a.pb -``` - -Dump a manifest: - -```console -$ ./bin/continuity ls /tmp/a.pb -... --rw-rw-r-- 270 B /.gitignore --rw-rw-r-- 88 B /.mailmap --rw-rw-r-- 187 B /.travis.yml --rw-rw-r-- 359 B /AUTHORS --rw-rw-r-- 11 kB /LICENSE --rw-rw-r-- 1.5 kB /Makefile -... --rw-rw-r-- 986 B /testutil_test.go -drwxrwxr-x 0 B /version --rw-rw-r-- 478 B /version/version.go -``` - -Verify a manifest: - -```console -$ ./bin/continuity verify . /tmp/a.pb -``` - -Break the directory and restore using the manifest: -```console -$ chmod 777 Makefile -$ ./bin/continuity verify . /tmp/a.pb -2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r-- -$ ./bin/continuity apply . /tmp/a.pb -$ stat -c %a Makefile -664 -$ ./bin/continuity verify . /tmp/a.pb -``` - -## Platforms - -continuity primarily targets Linux. continuity may compile for and work on -other operating systems, but those platforms are not tested. - -## Contribution Guide -### Building Proto Package - -If you change the proto file you will need to rebuild the generated Go with `go generate`. - -```console -$ go generate ./proto -``` - -## Project details - -continuity is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/continuity/go.mod b/vendor/github.com/containerd/continuity/go.mod deleted file mode 100644 index 35cea6ae8d00..000000000000 --- a/vendor/github.com/containerd/continuity/go.mod +++ /dev/null @@ -1,15 +0,0 @@ -module github.com/containerd/continuity - -go 1.13 - -require ( - bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898 - github.com/dustin/go-humanize v1.0.0 - github.com/golang/protobuf v1.3.5 - github.com/opencontainers/go-digest v1.0.0 - github.com/pkg/errors v0.9.1 - github.com/sirupsen/logrus v1.7.0 - github.com/spf13/cobra v1.0.0 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c -) diff --git a/vendor/github.com/containerd/continuity/sysx/README.md b/vendor/github.com/containerd/continuity/sysx/README.md deleted file mode 100644 index ad7aee533168..000000000000 --- a/vendor/github.com/containerd/continuity/sysx/README.md +++ /dev/null @@ -1,3 +0,0 @@ -This package is for internal use only. It is intended to only have -temporary changes before they are upstreamed to golang.org/x/sys/ -(a.k.a. https://github.com/golang/sys). diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go deleted file mode 100644 index 28ce5d8de331..000000000000 --- a/vendor/github.com/containerd/continuity/sysx/nodata_linux.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "syscall" -) - -const ENODATA = syscall.ENODATA diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go deleted file mode 100644 index e0575f4468ef..000000000000 --- a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "syscall" -) - -// This should actually be a set that contains ENOENT and EPERM -const ENODATA = syscall.ENOENT diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go deleted file mode 100644 index de4b3d50ceda..000000000000 --- a/vendor/github.com/containerd/continuity/sysx/nodata_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build darwin freebsd openbsd - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "syscall" -) - -const ENODATA = syscall.ENOATTR diff --git a/vendor/github.com/containerd/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go deleted file mode 100644 index db6fe70fe927..000000000000 --- a/vendor/github.com/containerd/continuity/sysx/xattr.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build linux darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "bytes" - - "golang.org/x/sys/unix" -) - -// Listxattr calls syscall listxattr and reads all content -// and returns a string array -func Listxattr(path string) ([]string, error) { - return listxattrAll(path, unix.Listxattr) -} - -// Removexattr calls syscall removexattr -func Removexattr(path string, attr string) (err error) { - return unix.Removexattr(path, attr) -} - -// Setxattr calls syscall setxattr -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - return unix.Setxattr(path, attr, data, flags) -} - -// Getxattr calls syscall getxattr -func Getxattr(path, attr string) ([]byte, error) { - return getxattrAll(path, attr, unix.Getxattr) -} - -// LListxattr lists xattrs, not following symlinks -func LListxattr(path string) ([]string, error) { - return listxattrAll(path, unix.Llistxattr) -} - -// LRemovexattr removes an xattr, not following symlinks -func LRemovexattr(path string, attr string) (err error) { - return unix.Lremovexattr(path, attr) -} - -// LSetxattr sets an xattr, not following symlinks -func LSetxattr(path string, attr string, data []byte, flags int) (err error) { - return unix.Lsetxattr(path, attr, data, flags) -} - -// LGetxattr gets an xattr, not following symlinks -func LGetxattr(path, attr string) ([]byte, error) { - return getxattrAll(path, attr, unix.Lgetxattr) -} - -const defaultXattrBufferSize = 128 - -type listxattrFunc func(path string, dest []byte) (int, error) - -func listxattrAll(path string, listFunc listxattrFunc) ([]string, error) { - buf := make([]byte, defaultXattrBufferSize) - n, err := listFunc(path, buf) - for err == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - n, err = listFunc(path, []byte{}) - if err != nil { - return nil, err - } - buf = make([]byte, n) - n, err = listFunc(path, buf) - } - if err != nil { - return nil, err - } - - ps := bytes.Split(bytes.TrimSuffix(buf[:n], []byte{0}), []byte{0}) - var entries []string - for _, p := range ps { - if len(p) > 0 { - entries = append(entries, string(p)) - } - } - - return entries, nil -} - -type getxattrFunc func(string, string, []byte) (int, error) - -func getxattrAll(path, attr string, getFunc getxattrFunc) ([]byte, error) { - buf := make([]byte, defaultXattrBufferSize) - n, err := getFunc(path, attr, buf) - for err == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - n, err = getFunc(path, attr, []byte{}) - if err != nil { - return nil, err - } - buf = make([]byte, n) - n, err = getFunc(path, attr, buf) - } - if err != nil { - return nil, err - } - return buf[:n], nil -} diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go deleted file mode 100644 index f8fa8c63fbae..000000000000 --- a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go +++ /dev/null @@ -1,67 +0,0 @@ -// +build !linux,!darwin - -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package sysx - -import ( - "errors" - "runtime" -) - -var errUnsupported = errors.New("extended attributes unsupported on " + runtime.GOOS) - -// Listxattr calls syscall listxattr and reads all content -// and returns a string array -func Listxattr(path string) ([]string, error) { - return []string{}, nil -} - -// Removexattr calls syscall removexattr -func Removexattr(path string, attr string) (err error) { - return errUnsupported -} - -// Setxattr calls syscall setxattr -func Setxattr(path string, attr string, data []byte, flags int) (err error) { - return errUnsupported -} - -// Getxattr calls syscall getxattr -func Getxattr(path, attr string) ([]byte, error) { - return []byte{}, errUnsupported -} - -// LListxattr lists xattrs, not following symlinks -func LListxattr(path string) ([]string, error) { - return []string{}, nil -} - -// LRemovexattr removes an xattr, not following symlinks -func LRemovexattr(path string, attr string) (err error) { - return errUnsupported -} - -// LSetxattr sets an xattr, not following symlinks -func LSetxattr(path string, attr string, data []byte, flags int) (err error) { - return errUnsupported -} - -// LGetxattr gets an xattr, not following symlinks -func LGetxattr(path, attr string) ([]byte, error) { - return []byte{}, nil -} diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/containerd/typeurl/LICENSE deleted file mode 100644 index 584149b6ee28..000000000000 --- a/vendor/github.com/containerd/typeurl/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright The containerd Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md deleted file mode 100644 index d021e9672497..000000000000 --- a/vendor/github.com/containerd/typeurl/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# typeurl - -[![PkgGoDev](https://pkg.go.dev/badge/github.com/containerd/typeurl)](https://pkg.go.dev/github.com/containerd/typeurl) -[![Build Status](https://github.com/containerd/typeurl/workflows/CI/badge.svg)](https://github.com/containerd/typeurl/actions?query=workflow%3ACI) -[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl) -[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/typeurl)](https://goreportcard.com/report/github.com/containerd/typeurl) - -A Go package for managing the registration, marshaling, and unmarshaling of encoded types. - -This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any](https://github.com/gogo/protobuf/blob/master/protobuf/google/protobuf/any.proto). - -## Project details - -**typeurl** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE). -As a containerd sub-project, you will find the: - * [Project governance](https://github.com/containerd/project/blob/master/GOVERNANCE.md), - * [Maintainers](https://github.com/containerd/project/blob/master/MAINTAINERS), - * and [Contributing guidelines](https://github.com/containerd/project/blob/master/CONTRIBUTING.md) - -information in our [`containerd/project`](https://github.com/containerd/project) repository. diff --git a/vendor/github.com/containerd/typeurl/doc.go b/vendor/github.com/containerd/typeurl/doc.go deleted file mode 100644 index c0d0fd205333..000000000000 --- a/vendor/github.com/containerd/typeurl/doc.go +++ /dev/null @@ -1,83 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -// Package typeurl assists with managing the registration, marshaling, and -// unmarshaling of types encoded as protobuf.Any. -// -// A protobuf.Any is a proto message that can contain any arbitrary data. It -// consists of two components, a TypeUrl and a Value, and its proto definition -// looks like this: -// -// message Any { -// string type_url = 1; -// bytes value = 2; -// } -// -// The TypeUrl is used to distinguish the contents from other proto.Any -// messages. This typeurl library manages these URLs to enable automagic -// marshaling and unmarshaling of the contents. -// -// For example, consider this go struct: -// -// type Foo struct { -// Field1 string -// Field2 string -// } -// -// To use typeurl, types must first be registered. This is typically done in -// the init function -// -// func init() { -// typeurl.Register(&Foo{}, "Foo") -// } -// -// This will register the type Foo with the url path "Foo". The arguments to -// Register are variadic, and are used to construct a url path. Consider this -// example, from the github.com/containerd/containerd/client package: -// -// func init() { -// const prefix = "types.containerd.io" -// // register TypeUrls for commonly marshaled external types -// major := strconv.Itoa(specs.VersionMajor) -// typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec") -// // this function has more Register calls, which are elided. -// } -// -// This registers several types under a more complex url, which ends up mapping -// to `types.containerd.io/opencontainers/runtime-spec/1/Spec` (or some other -// value for major). -// -// Once a type is registered, it can be marshaled to a proto.Any message simply -// by calling `MarshalAny`, like this: -// -// foo := &Foo{Field1: "value1", Field2: "value2"} -// anyFoo, err := typeurl.MarshalAny(foo) -// -// MarshalAny will resolve the correct URL for the type. If the type in -// question implements the proto.Message interface, then it will be marshaled -// as a proto message. Otherwise, it will be marshaled as json. This means that -// typeurl will work on any arbitrary data, whether or not it has a proto -// definition, as long as it can be serialized to json. -// -// To unmarshal, the process is simply inverse: -// -// iface, err := typeurl.UnmarshalAny(anyFoo) -// foo := iface.(*Foo) -// -// The correct type is automatically chosen from the type registry, and the -// returned interface can be cast straight to that type. diff --git a/vendor/github.com/containerd/typeurl/go.mod b/vendor/github.com/containerd/typeurl/go.mod deleted file mode 100644 index 77e171e57bcb..000000000000 --- a/vendor/github.com/containerd/typeurl/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module github.com/containerd/typeurl - -go 1.13 - -require ( - github.com/gogo/protobuf v1.3.2 - github.com/pkg/errors v0.9.1 -) diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go deleted file mode 100644 index 647d419a293d..000000000000 --- a/vendor/github.com/containerd/typeurl/types.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - Copyright The containerd Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. -*/ - -package typeurl - -import ( - "encoding/json" - "path" - "reflect" - "sync" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/types" - "github.com/pkg/errors" -) - -var ( - mu sync.RWMutex - registry = make(map[reflect.Type]string) -) - -// Definitions of common error types used throughout typeurl. -// -// These error types are used with errors.Wrap and errors.Wrapf to add context -// to an error. -// -// To detect an error class, use errors.Is() functions to tell whether an -// error is of this type. -var ( - ErrNotFound = errors.New("not found") -) - -// Register a type with a base URL for JSON marshaling. When the MarshalAny and -// UnmarshalAny functions are called they will treat the Any type value as JSON. -// To use protocol buffers for handling the Any value the proto.Register -// function should be used instead of this function. -func Register(v interface{}, args ...string) { - var ( - t = tryDereference(v) - p = path.Join(args...) - ) - mu.Lock() - defer mu.Unlock() - if et, ok := registry[t]; ok { - if et != p { - panic(errors.Errorf("type registered with alternate path %q != %q", et, p)) - } - return - } - registry[t] = p -} - -// TypeURL returns the type url for a registered type. -func TypeURL(v interface{}) (string, error) { - mu.RLock() - u, ok := registry[tryDereference(v)] - mu.RUnlock() - if !ok { - // fallback to the proto registry if it is a proto message - pb, ok := v.(proto.Message) - if !ok { - return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v)) - } - return proto.MessageName(pb), nil - } - return u, nil -} - -// Is returns true if the type of the Any is the same as v. -func Is(any *types.Any, v interface{}) bool { - // call to check that v is a pointer - tryDereference(v) - url, err := TypeURL(v) - if err != nil { - return false - } - return any.TypeUrl == url -} - -// MarshalAny marshals the value v into an any with the correct TypeUrl. -// If the provided object is already a proto.Any message, then it will be -// returned verbatim. If it is of type proto.Message, it will be marshaled as a -// protocol buffer. Otherwise, the object will be marshaled to json. -func MarshalAny(v interface{}) (*types.Any, error) { - var marshal func(v interface{}) ([]byte, error) - switch t := v.(type) { - case *types.Any: - // avoid reserializing the type if we have an any. - return t, nil - case proto.Message: - marshal = func(v interface{}) ([]byte, error) { - return proto.Marshal(t) - } - default: - marshal = json.Marshal - } - - url, err := TypeURL(v) - if err != nil { - return nil, err - } - - data, err := marshal(v) - if err != nil { - return nil, err - } - return &types.Any{ - TypeUrl: url, - Value: data, - }, nil -} - -// UnmarshalAny unmarshals the any type into a concrete type. -func UnmarshalAny(any *types.Any) (interface{}, error) { - return UnmarshalByTypeURL(any.TypeUrl, any.Value) -} - -// UnmarshalByTypeURL unmarshals the given type and value to into a concrete type. -func UnmarshalByTypeURL(typeURL string, value []byte) (interface{}, error) { - return unmarshal(typeURL, value, nil) -} - -// UnmarshalTo unmarshals the any type into a concrete type passed in the out -// argument. It is identical to UnmarshalAny, but lets clients provide a -// destination type through the out argument. -func UnmarshalTo(any *types.Any, out interface{}) error { - return UnmarshalToByTypeURL(any.TypeUrl, any.Value, out) -} - -// UnmarshalTo unmarshals the given type and value into a concrete type passed -// in the out argument. It is identical to UnmarshalByTypeURL, but lets clients -// provide a destination type through the out argument. -func UnmarshalToByTypeURL(typeURL string, value []byte, out interface{}) error { - _, err := unmarshal(typeURL, value, out) - return err -} - -func unmarshal(typeURL string, value []byte, v interface{}) (interface{}, error) { - t, err := getTypeByUrl(typeURL) - if err != nil { - return nil, err - } - - if v == nil { - v = reflect.New(t.t).Interface() - } else { - // Validate interface type provided by client - vURL, err := TypeURL(v) - if err != nil { - return nil, err - } - if typeURL != vURL { - return nil, errors.Errorf("can't unmarshal type %q to output %q", typeURL, vURL) - } - } - - if t.isProto { - err = proto.Unmarshal(value, v.(proto.Message)) - } else { - err = json.Unmarshal(value, v) - } - - return v, err -} - -type urlType struct { - t reflect.Type - isProto bool -} - -func getTypeByUrl(url string) (urlType, error) { - mu.RLock() - for t, u := range registry { - if u == url { - mu.RUnlock() - return urlType{ - t: t, - }, nil - } - } - mu.RUnlock() - // fallback to proto registry - t := proto.MessageType(url) - if t != nil { - return urlType{ - // get the underlying Elem because proto returns a pointer to the type - t: t.Elem(), - isProto: true, - }, nil - } - return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url) -} - -func tryDereference(v interface{}) reflect.Type { - t := reflect.TypeOf(v) - if t.Kind() == reflect.Ptr { - // require check of pointer but dereference to register - return t.Elem() - } - panic("v is not a pointer to a type") -} diff --git a/vendor/github.com/gofrs/flock/LICENSE b/vendor/github.com/gofrs/flock/LICENSE deleted file mode 100644 index 8b8ff36fe426..000000000000 --- a/vendor/github.com/gofrs/flock/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015-2020, Tim Heckman -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of gofrs nor the names of its contributors may be used - to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gofrs/flock/README.md b/vendor/github.com/gofrs/flock/README.md deleted file mode 100644 index 71ce63692e8e..000000000000 --- a/vendor/github.com/gofrs/flock/README.md +++ /dev/null @@ -1,41 +0,0 @@ -# flock -[![TravisCI Build Status](https://img.shields.io/travis/gofrs/flock/master.svg?style=flat)](https://travis-ci.org/gofrs/flock) -[![GoDoc](https://img.shields.io/badge/godoc-flock-blue.svg?style=flat)](https://godoc.org/github.com/gofrs/flock) -[![License](https://img.shields.io/badge/license-BSD_3--Clause-brightgreen.svg?style=flat)](https://github.com/gofrs/flock/blob/master/LICENSE) -[![Go Report Card](https://goreportcard.com/badge/github.com/gofrs/flock)](https://goreportcard.com/report/github.com/gofrs/flock) - -`flock` implements a thread-safe sync.Locker interface for file locking. It also -includes a non-blocking TryLock() function to allow locking without blocking execution. - -## License -`flock` is released under the BSD 3-Clause License. See the `LICENSE` file for more details. - -## Go Compatibility -This package makes use of the `context` package that was introduced in Go 1.7. As such, this -package has an implicit dependency on Go 1.7+. - -## Installation -``` -go get -u github.com/gofrs/flock -``` - -## Usage -```Go -import "github.com/gofrs/flock" - -fileLock := flock.New("/var/lock/go-lock.lock") - -locked, err := fileLock.TryLock() - -if err != nil { - // handle locking error -} - -if locked { - // do work - fileLock.Unlock() -} -``` - -For more detailed usage information take a look at the package API docs on -[GoDoc](https://godoc.org/github.com/gofrs/flock). diff --git a/vendor/github.com/gofrs/flock/flock.go b/vendor/github.com/gofrs/flock/flock.go deleted file mode 100644 index 2fd16033763d..000000000000 --- a/vendor/github.com/gofrs/flock/flock.go +++ /dev/null @@ -1,135 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -// Package flock implements a thread-safe interface for file locking. -// It also includes a non-blocking TryLock() function to allow locking -// without blocking execution. -// -// Package flock is released under the BSD 3-Clause License. See the LICENSE file -// for more details. -// -// While using this library, remember that the locking behaviors are not -// guaranteed to be the same on each platform. For example, some UNIX-like -// operating systems will transparently convert a shared lock to an exclusive -// lock. If you Unlock() the flock from a location where you believe that you -// have the shared lock, you may accidentally drop the exclusive lock. -package flock - -import ( - "context" - "os" - "sync" - "time" -) - -// Flock is the struct type to handle file locking. All fields are unexported, -// with access to some of the fields provided by getter methods (Path() and Locked()). -type Flock struct { - path string - m sync.RWMutex - fh *os.File - l bool - r bool -} - -// New returns a new instance of *Flock. The only parameter -// it takes is the path to the desired lockfile. -func New(path string) *Flock { - return &Flock{path: path} -} - -// NewFlock returns a new instance of *Flock. The only parameter -// it takes is the path to the desired lockfile. -// -// Deprecated: Use New instead. -func NewFlock(path string) *Flock { - return New(path) -} - -// Close is equivalent to calling Unlock. -// -// This will release the lock and close the underlying file descriptor. -// It will not remove the file from disk, that's up to your application. -func (f *Flock) Close() error { - return f.Unlock() -} - -// Path returns the path as provided in NewFlock(). -func (f *Flock) Path() string { - return f.path -} - -// Locked returns the lock state (locked: true, unlocked: false). -// -// Warning: by the time you use the returned value, the state may have changed. -func (f *Flock) Locked() bool { - f.m.RLock() - defer f.m.RUnlock() - return f.l -} - -// RLocked returns the read lock state (locked: true, unlocked: false). -// -// Warning: by the time you use the returned value, the state may have changed. -func (f *Flock) RLocked() bool { - f.m.RLock() - defer f.m.RUnlock() - return f.r -} - -func (f *Flock) String() string { - return f.path -} - -// TryLockContext repeatedly tries to take an exclusive lock until one of the -// conditions is met: TryLock succeeds, TryLock fails with error, or Context -// Done channel is closed. -func (f *Flock) TryLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { - return tryCtx(ctx, f.TryLock, retryDelay) -} - -// TryRLockContext repeatedly tries to take a shared lock until one of the -// conditions is met: TryRLock succeeds, TryRLock fails with error, or Context -// Done channel is closed. -func (f *Flock) TryRLockContext(ctx context.Context, retryDelay time.Duration) (bool, error) { - return tryCtx(ctx, f.TryRLock, retryDelay) -} - -func tryCtx(ctx context.Context, fn func() (bool, error), retryDelay time.Duration) (bool, error) { - if ctx.Err() != nil { - return false, ctx.Err() - } - for { - if ok, err := fn(); ok || err != nil { - return ok, err - } - select { - case <-ctx.Done(): - return false, ctx.Err() - case <-time.After(retryDelay): - // try again - } - } -} - -func (f *Flock) setFh() error { - // open a new os.File instance - // create it if it doesn't exist, and open the file read-only. - fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDONLY, os.FileMode(0600)) - if err != nil { - return err - } - - // set the filehandle on the struct - f.fh = fh - return nil -} - -// ensure the file handle is closed if no lock is held -func (f *Flock) ensureFhState() { - if !f.l && !f.r && f.fh != nil { - f.fh.Close() - f.fh = nil - } -} diff --git a/vendor/github.com/gofrs/flock/flock_unix.go b/vendor/github.com/gofrs/flock/flock_unix.go deleted file mode 100644 index 366a60ca6d3b..000000000000 --- a/vendor/github.com/gofrs/flock/flock_unix.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -// +build !windows - -package flock - -import ( - "os" - "syscall" -) - -// Lock is a blocking call to try and take an exclusive file lock. It will wait -// until it is able to obtain the exclusive file lock. It's recommended that -// TryLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already exclusive-locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -// -// If the *Flock has a shared lock (RLock), this may transparently replace the -// shared lock with an exclusive lock on some UNIX-like operating systems. Be -// careful when using exclusive locks in conjunction with shared locks -// (RLock()), because calling Unlock() may accidentally release the exclusive -// lock that was once a shared lock. -func (f *Flock) Lock() error { - return f.lock(&f.l, syscall.LOCK_EX) -} - -// RLock is a blocking call to try and take a shared file lock. It will wait -// until it is able to obtain the shared file lock. It's recommended that -// TryRLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already shared-locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -func (f *Flock) RLock() error { - return f.lock(&f.r, syscall.LOCK_SH) -} - -func (f *Flock) lock(locked *bool, flag int) error { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return err - } - defer f.ensureFhState() - } - - if err := syscall.Flock(int(f.fh.Fd()), flag); err != nil { - shouldRetry, reopenErr := f.reopenFDOnError(err) - if reopenErr != nil { - return reopenErr - } - - if !shouldRetry { - return err - } - - if err = syscall.Flock(int(f.fh.Fd()), flag); err != nil { - return err - } - } - - *locked = true - return nil -} - -// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so -// while it is running the Locked() and RLocked() functions will be blocked. -// -// This function short-circuits if we are unlocked already. If not, it calls -// syscall.LOCK_UN on the file and closes the file descriptor. It does not -// remove the file from disk. It's up to your application to do. -// -// Please note, if your shared lock became an exclusive lock this may -// unintentionally drop the exclusive lock if called by the consumer that -// believes they have a shared lock. Please see Lock() for more details. -func (f *Flock) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - // if we aren't locked or if the lockfile instance is nil - // just return a nil error because we are unlocked - if (!f.l && !f.r) || f.fh == nil { - return nil - } - - // mark the file as unlocked - if err := syscall.Flock(int(f.fh.Fd()), syscall.LOCK_UN); err != nil { - return err - } - - f.fh.Close() - - f.l = false - f.r = false - f.fh = nil - - return nil -} - -// TryLock is the preferred function for taking an exclusive file lock. This -// function takes an RW-mutex lock before it tries to lock the file, so there is -// the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the exclusive -// file lock, the function will return false instead of waiting for the lock. If -// we get the lock, we also set the *Flock instance as being exclusive-locked. -func (f *Flock) TryLock() (bool, error) { - return f.try(&f.l, syscall.LOCK_EX) -} - -// TryRLock is the preferred function for taking a shared file lock. This -// function takes an RW-mutex lock before it tries to lock the file, so there is -// the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the shared file -// lock, the function will return false instead of waiting for the lock. If we -// get the lock, we also set the *Flock instance as being share-locked. -func (f *Flock) TryRLock() (bool, error) { - return f.try(&f.r, syscall.LOCK_SH) -} - -func (f *Flock) try(locked *bool, flag int) (bool, error) { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return true, nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return false, err - } - defer f.ensureFhState() - } - - var retried bool -retry: - err := syscall.Flock(int(f.fh.Fd()), flag|syscall.LOCK_NB) - - switch err { - case syscall.EWOULDBLOCK: - return false, nil - case nil: - *locked = true - return true, nil - } - if !retried { - if shouldRetry, reopenErr := f.reopenFDOnError(err); reopenErr != nil { - return false, reopenErr - } else if shouldRetry { - retried = true - goto retry - } - } - - return false, err -} - -// reopenFDOnError determines whether we should reopen the file handle -// in readwrite mode and try again. This comes from util-linux/sys-utils/flock.c: -// Since Linux 3.4 (commit 55725513) -// Probably NFSv4 where flock() is emulated by fcntl(). -func (f *Flock) reopenFDOnError(err error) (bool, error) { - if err != syscall.EIO && err != syscall.EBADF { - return false, nil - } - if st, err := f.fh.Stat(); err == nil { - // if the file is able to be read and written - if st.Mode()&0600 == 0600 { - f.fh.Close() - f.fh = nil - - // reopen in read-write mode and set the filehandle - fh, err := os.OpenFile(f.path, os.O_CREATE|os.O_RDWR, os.FileMode(0600)) - if err != nil { - return false, err - } - f.fh = fh - return true, nil - } - } - - return false, nil -} diff --git a/vendor/github.com/gofrs/flock/flock_winapi.go b/vendor/github.com/gofrs/flock/flock_winapi.go deleted file mode 100644 index fe405a255ae5..000000000000 --- a/vendor/github.com/gofrs/flock/flock_winapi.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -// +build windows - -package flock - -import ( - "syscall" - "unsafe" -) - -var ( - kernel32, _ = syscall.LoadLibrary("kernel32.dll") - procLockFileEx, _ = syscall.GetProcAddress(kernel32, "LockFileEx") - procUnlockFileEx, _ = syscall.GetProcAddress(kernel32, "UnlockFileEx") -) - -const ( - winLockfileFailImmediately = 0x00000001 - winLockfileExclusiveLock = 0x00000002 - winLockfileSharedLock = 0x00000000 -) - -// Use of 0x00000000 for the shared lock is a guess based on some the MS Windows -// `LockFileEX` docs, which document the `LOCKFILE_EXCLUSIVE_LOCK` flag as: -// -// > The function requests an exclusive lock. Otherwise, it requests a shared -// > lock. -// -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - -func lockFileEx(handle syscall.Handle, flags uint32, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { - r1, _, errNo := syscall.Syscall6( - uintptr(procLockFileEx), - 6, - uintptr(handle), - uintptr(flags), - uintptr(reserved), - uintptr(numberOfBytesToLockLow), - uintptr(numberOfBytesToLockHigh), - uintptr(unsafe.Pointer(offset))) - - if r1 != 1 { - if errNo == 0 { - return false, syscall.EINVAL - } - - return false, errNo - } - - return true, 0 -} - -func unlockFileEx(handle syscall.Handle, reserved uint32, numberOfBytesToLockLow uint32, numberOfBytesToLockHigh uint32, offset *syscall.Overlapped) (bool, syscall.Errno) { - r1, _, errNo := syscall.Syscall6( - uintptr(procUnlockFileEx), - 5, - uintptr(handle), - uintptr(reserved), - uintptr(numberOfBytesToLockLow), - uintptr(numberOfBytesToLockHigh), - uintptr(unsafe.Pointer(offset)), - 0) - - if r1 != 1 { - if errNo == 0 { - return false, syscall.EINVAL - } - - return false, errNo - } - - return true, 0 -} diff --git a/vendor/github.com/gofrs/flock/flock_windows.go b/vendor/github.com/gofrs/flock/flock_windows.go deleted file mode 100644 index ddb534ccef09..000000000000 --- a/vendor/github.com/gofrs/flock/flock_windows.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright 2015 Tim Heckman. All rights reserved. -// Use of this source code is governed by the BSD 3-Clause -// license that can be found in the LICENSE file. - -package flock - -import ( - "syscall" -) - -// ErrorLockViolation is the error code returned from the Windows syscall when a -// lock would block and you ask to fail immediately. -const ErrorLockViolation syscall.Errno = 0x21 // 33 - -// Lock is a blocking call to try and take an exclusive file lock. It will wait -// until it is able to obtain the exclusive file lock. It's recommended that -// TryLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -func (f *Flock) Lock() error { - return f.lock(&f.l, winLockfileExclusiveLock) -} - -// RLock is a blocking call to try and take a shared file lock. It will wait -// until it is able to obtain the shared file lock. It's recommended that -// TryRLock() be used over this function. This function may block the ability to -// query the current Locked() or RLocked() status due to a RW-mutex lock. -// -// If we are already locked, this function short-circuits and returns -// immediately assuming it can take the mutex lock. -func (f *Flock) RLock() error { - return f.lock(&f.r, winLockfileSharedLock) -} - -func (f *Flock) lock(locked *bool, flag uint32) error { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return err - } - defer f.ensureFhState() - } - - if _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { - return errNo - } - - *locked = true - return nil -} - -// Unlock is a function to unlock the file. This file takes a RW-mutex lock, so -// while it is running the Locked() and RLocked() functions will be blocked. -// -// This function short-circuits if we are unlocked already. If not, it calls -// UnlockFileEx() on the file and closes the file descriptor. It does not remove -// the file from disk. It's up to your application to do. -func (f *Flock) Unlock() error { - f.m.Lock() - defer f.m.Unlock() - - // if we aren't locked or if the lockfile instance is nil - // just return a nil error because we are unlocked - if (!f.l && !f.r) || f.fh == nil { - return nil - } - - // mark the file as unlocked - if _, errNo := unlockFileEx(syscall.Handle(f.fh.Fd()), 0, 1, 0, &syscall.Overlapped{}); errNo > 0 { - return errNo - } - - f.fh.Close() - - f.l = false - f.r = false - f.fh = nil - - return nil -} - -// TryLock is the preferred function for taking an exclusive file lock. This -// function does take a RW-mutex lock before it tries to lock the file, so there -// is the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the exclusive -// file lock, the function will return false instead of waiting for the lock. If -// we get the lock, we also set the *Flock instance as being exclusive-locked. -func (f *Flock) TryLock() (bool, error) { - return f.try(&f.l, winLockfileExclusiveLock) -} - -// TryRLock is the preferred function for taking a shared file lock. This -// function does take a RW-mutex lock before it tries to lock the file, so there -// is the possibility that this function may block for a short time if another -// goroutine is trying to take any action. -// -// The actual file lock is non-blocking. If we are unable to get the shared file -// lock, the function will return false instead of waiting for the lock. If we -// get the lock, we also set the *Flock instance as being shared-locked. -func (f *Flock) TryRLock() (bool, error) { - return f.try(&f.r, winLockfileSharedLock) -} - -func (f *Flock) try(locked *bool, flag uint32) (bool, error) { - f.m.Lock() - defer f.m.Unlock() - - if *locked { - return true, nil - } - - if f.fh == nil { - if err := f.setFh(); err != nil { - return false, err - } - defer f.ensureFhState() - } - - _, errNo := lockFileEx(syscall.Handle(f.fh.Fd()), flag|winLockfileFailImmediately, 0, 1, 0, &syscall.Overlapped{}) - - if errNo > 0 { - if errNo == ErrorLockViolation || errNo == syscall.ERROR_IO_PENDING { - return false, nil - } - - return false, errNo - } - - *locked = true - - return true, nil -} diff --git a/vendor/github.com/gogo/googleapis/LICENSE b/vendor/github.com/gogo/googleapis/LICENSE deleted file mode 100644 index d6f85b181780..000000000000 --- a/vendor/github.com/gogo/googleapis/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2015, Google Inc - Copyright 2018, GoGo Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/gogo/googleapis/Readme.md b/vendor/github.com/gogo/googleapis/Readme.md deleted file mode 100644 index 587eb66dfd73..000000000000 --- a/vendor/github.com/gogo/googleapis/Readme.md +++ /dev/null @@ -1,5 +0,0 @@ -# Google APIs generated by gogoprotobuf - -[![Build Status](https://travis-ci.org/gogo/googleapis.svg?branch=master)](https://travis-ci.org/gogo/googleapis) - -The [grpc-example](https://github.com/gogo/grpc-example) includes an example usage of this repository. diff --git a/vendor/github.com/gogo/googleapis/go.mod b/vendor/github.com/gogo/googleapis/go.mod deleted file mode 100644 index 89be4cf8789e..000000000000 --- a/vendor/github.com/gogo/googleapis/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/gogo/googleapis - -go 1.12 - -require github.com/gogo/protobuf v1.3.1 diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go deleted file mode 100644 index 1f4d4d432217..000000000000 --- a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go +++ /dev/null @@ -1,257 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: google/rpc/code.proto - -package rpc - -import ( - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - math "math" - strconv "strconv" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// The canonical error codes for Google APIs. -// -// -// Sometimes multiple error codes may apply. Services should return -// the most specific error code that applies. For example, prefer -// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. -// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. -type Code int32 - -const ( - // Not an error; returned on success - // - // HTTP Mapping: 200 OK - OK Code = 0 - // The operation was cancelled, typically by the caller. - // - // HTTP Mapping: 499 Client Closed Request - CANCELLED Code = 1 - // Unknown error. For example, this error may be returned when - // a `Status` value received from another address space belongs to - // an error space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - // - // HTTP Mapping: 500 Internal Server Error - UNKNOWN Code = 2 - // The client specified an invalid argument. Note that this differs - // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - // - // HTTP Mapping: 400 Bad Request - INVALID_ARGUMENT Code = 3 - // The deadline expired before the operation could complete. For operations - // that change the state of the system, this error may be returned - // even if the operation has completed successfully. For example, a - // successful response from a server could have been delayed long - // enough for the deadline to expire. - // - // HTTP Mapping: 504 Gateway Timeout - DEADLINE_EXCEEDED Code = 4 - // Some requested entity (e.g., file or directory) was not found. - // - // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented whitelist, - // `NOT_FOUND` may be used. If a request is denied for some users within - // a class of users, such as user-based access control, `PERMISSION_DENIED` - // must be used. - // - // HTTP Mapping: 404 Not Found - NOT_FOUND Code = 5 - // The entity that a client attempted to create (e.g., file or directory) - // already exists. - // - // HTTP Mapping: 409 Conflict - ALREADY_EXISTS Code = 6 - // The caller does not have permission to execute the specified - // operation. `PERMISSION_DENIED` must not be used for rejections - // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` - // instead for those errors). `PERMISSION_DENIED` must not be - // used if the caller can not be identified (use `UNAUTHENTICATED` - // instead for those errors). This error code does not imply the - // request is valid or the requested entity exists or satisfies - // other pre-conditions. - // - // HTTP Mapping: 403 Forbidden - PERMISSION_DENIED Code = 7 - // The request does not have valid authentication credentials for the - // operation. - // - // HTTP Mapping: 401 Unauthorized - UNAUTHENTICATED Code = 16 - // Some resource has been exhausted, perhaps a per-user quota, or - // perhaps the entire file system is out of space. - // - // HTTP Mapping: 429 Too Many Requests - RESOURCE_EXHAUSTED Code = 8 - // The operation was rejected because the system is not in a state - // required for the operation's execution. For example, the directory - // to be deleted is non-empty, an rmdir operation is applied to - // a non-directory, etc. - // - // Service implementors can use the following guidelines to decide - // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence). - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. - // - // HTTP Mapping: 400 Bad Request - FAILED_PRECONDITION Code = 9 - // The operation was aborted, typically due to a concurrency issue such as - // a sequencer check failure or transaction abort. - // - // See the guidelines above for deciding between `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 409 Conflict - ABORTED Code = 10 - // The operation was attempted past the valid range. E.g., seeking or - // reading past end-of-file. - // - // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate `INVALID_ARGUMENT` if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // `OUT_OF_RANGE` if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between `FAILED_PRECONDITION` and - // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an `OUT_OF_RANGE` error to detect when - // they are done. - // - // HTTP Mapping: 400 Bad Request - OUT_OF_RANGE Code = 11 - // The operation is not implemented or is not supported/enabled in this - // service. - // - // HTTP Mapping: 501 Not Implemented - UNIMPLEMENTED Code = 12 - // Internal errors. This means that some invariants expected by the - // underlying system have been broken. This error code is reserved - // for serious errors. - // - // HTTP Mapping: 500 Internal Server Error - INTERNAL Code = 13 - // The service is currently unavailable. This is most likely a - // transient condition, which can be corrected by retrying with - // a backoff. - // - // See the guidelines above for deciding between `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 503 Service Unavailable - UNAVAILABLE Code = 14 - // Unrecoverable data loss or corruption. - // - // HTTP Mapping: 500 Internal Server Error - DATA_LOSS Code = 15 -) - -var Code_name = map[int32]string{ - 0: "OK", - 1: "CANCELLED", - 2: "UNKNOWN", - 3: "INVALID_ARGUMENT", - 4: "DEADLINE_EXCEEDED", - 5: "NOT_FOUND", - 6: "ALREADY_EXISTS", - 7: "PERMISSION_DENIED", - 16: "UNAUTHENTICATED", - 8: "RESOURCE_EXHAUSTED", - 9: "FAILED_PRECONDITION", - 10: "ABORTED", - 11: "OUT_OF_RANGE", - 12: "UNIMPLEMENTED", - 13: "INTERNAL", - 14: "UNAVAILABLE", - 15: "DATA_LOSS", -} - -var Code_value = map[string]int32{ - "OK": 0, - "CANCELLED": 1, - "UNKNOWN": 2, - "INVALID_ARGUMENT": 3, - "DEADLINE_EXCEEDED": 4, - "NOT_FOUND": 5, - "ALREADY_EXISTS": 6, - "PERMISSION_DENIED": 7, - "UNAUTHENTICATED": 16, - "RESOURCE_EXHAUSTED": 8, - "FAILED_PRECONDITION": 9, - "ABORTED": 10, - "OUT_OF_RANGE": 11, - "UNIMPLEMENTED": 12, - "INTERNAL": 13, - "UNAVAILABLE": 14, - "DATA_LOSS": 15, -} - -func (Code) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_fe593a732623ccf0, []int{0} -} - -func init() { - proto.RegisterEnum("google.rpc.Code", Code_name, Code_value) -} - -func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor_fe593a732623ccf0) } - -var fileDescriptor_fe593a732623ccf0 = []byte{ - // 393 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x91, 0x3d, 0x6e, 0x13, 0x41, - 0x14, 0xc7, 0x3d, 0x76, 0x70, 0xe2, 0xf1, 0xd7, 0xcb, 0x84, 0x40, 0x37, 0x07, 0xa0, 0x70, 0x0a, - 0x4e, 0xf0, 0xbc, 0xf3, 0x9c, 0x8c, 0x32, 0x7e, 0xb3, 0x9a, 0x9d, 0x09, 0x01, 0x21, 0xad, 0xc4, - 0xc6, 0x4a, 0x03, 0x5a, 0xcb, 0xe2, 0x00, 0x9c, 0x85, 0x8a, 0x1b, 0x70, 0x85, 0x94, 0x29, 0x29, - 0xf1, 0xa6, 0xa1, 0x74, 0x49, 0x89, 0x06, 0x0a, 0xda, 0x9f, 0xde, 0xc7, 0xff, 0x43, 0x9e, 0xdf, - 0xb7, 0xed, 0xfd, 0xc7, 0xcd, 0xc5, 0x6e, 0xdb, 0x5c, 0x34, 0xed, 0xdd, 0x66, 0xb1, 0xdd, 0xb5, - 0x9f, 0x5b, 0x25, 0xff, 0xe1, 0xc5, 0x6e, 0xdb, 0xbc, 0xfa, 0xde, 0x97, 0x47, 0x45, 0x7b, 0xb7, - 0x51, 0x43, 0xd9, 0xf7, 0xd7, 0xd0, 0x53, 0x53, 0x39, 0x2a, 0x90, 0x0b, 0x72, 0x8e, 0x0c, 0x08, - 0x35, 0x96, 0xc7, 0x89, 0xaf, 0xd9, 0xbf, 0x61, 0xe8, 0xab, 0xe7, 0x12, 0x2c, 0xdf, 0xa0, 0xb3, - 0xa6, 0xc6, 0x70, 0x99, 0xd6, 0xc4, 0x11, 0x06, 0xea, 0x5c, 0x9e, 0x1a, 0x42, 0xe3, 0x2c, 0x53, - 0x4d, 0xb7, 0x05, 0x91, 0x21, 0x03, 0x47, 0xf9, 0x10, 0xfb, 0x58, 0xaf, 0x7c, 0x62, 0x03, 0xcf, - 0x94, 0x92, 0x33, 0x74, 0x81, 0xd0, 0xbc, 0xad, 0xe9, 0xd6, 0x56, 0xb1, 0x82, 0x61, 0xde, 0x2c, - 0x29, 0xac, 0x6d, 0x55, 0x59, 0xcf, 0xb5, 0x21, 0xb6, 0x64, 0xe0, 0x58, 0x9d, 0xc9, 0x79, 0x62, - 0x4c, 0xf1, 0x8a, 0x38, 0xda, 0x02, 0x23, 0x19, 0x00, 0xf5, 0x42, 0xaa, 0x40, 0x95, 0x4f, 0xa1, - 0xc8, 0x5f, 0xae, 0x30, 0x55, 0x99, 0x9f, 0xa8, 0x97, 0xf2, 0x6c, 0x85, 0xd6, 0x91, 0xa9, 0xcb, - 0x40, 0x85, 0x67, 0x63, 0xa3, 0xf5, 0x0c, 0xa3, 0xac, 0x1c, 0x97, 0x3e, 0xe4, 0x29, 0xa9, 0x40, - 0x4e, 0x7c, 0x8a, 0xb5, 0x5f, 0xd5, 0x01, 0xf9, 0x92, 0x60, 0xac, 0x4e, 0xe5, 0x34, 0xb1, 0x5d, - 0x97, 0x8e, 0xb2, 0x0d, 0x32, 0x30, 0x51, 0x13, 0x79, 0x62, 0x39, 0x52, 0x60, 0x74, 0x30, 0x55, - 0x73, 0x39, 0x4e, 0x8c, 0x37, 0x68, 0x1d, 0x2e, 0x1d, 0xc1, 0x2c, 0x1b, 0x32, 0x18, 0xb1, 0x76, - 0xbe, 0xaa, 0x60, 0xbe, 0x7c, 0xff, 0xb8, 0xd7, 0xbd, 0x1f, 0x7b, 0xdd, 0x3b, 0xec, 0xb5, 0xf8, - 0xbd, 0xd7, 0xe2, 0x4b, 0xa7, 0xc5, 0xb7, 0x4e, 0x8b, 0x87, 0x4e, 0x8b, 0xc7, 0x4e, 0x8b, 0x9f, - 0x9d, 0x16, 0xbf, 0x3a, 0xdd, 0x3b, 0x64, 0xfe, 0xa4, 0xc5, 0xc3, 0x93, 0x16, 0x72, 0xd6, 0xb4, - 0x9f, 0x16, 0xff, 0xf3, 0x5f, 0x8e, 0x72, 0xf8, 0x65, 0xae, 0xa5, 0x14, 0xef, 0x06, 0xbb, 0x6d, - 0xf3, 0xb5, 0x3f, 0x08, 0x65, 0xf1, 0x61, 0xf8, 0xb7, 0xaa, 0xd7, 0x7f, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x03, 0xd4, 0x27, 0xff, 0xc3, 0x01, 0x00, 0x00, -} - -func (x Code) String() string { - s, ok := Code_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.proto b/vendor/github.com/gogo/googleapis/google/rpc/code.proto deleted file mode 100644 index 0540a4f6c583..000000000000 --- a/vendor/github.com/gogo/googleapis/google/rpc/code.proto +++ /dev/null @@ -1,185 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -option go_package = "rpc"; -option java_multiple_files = true; -option java_outer_classname = "CodeProto"; -option java_package = "com.google.rpc"; -option objc_class_prefix = "RPC"; - -// The canonical error codes for Google APIs. -// -// -// Sometimes multiple error codes may apply. Services should return -// the most specific error code that applies. For example, prefer -// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply. -// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`. -enum Code { - // Not an error; returned on success - // - // HTTP Mapping: 200 OK - OK = 0; - - // The operation was cancelled, typically by the caller. - // - // HTTP Mapping: 499 Client Closed Request - CANCELLED = 1; - - // Unknown error. For example, this error may be returned when - // a `Status` value received from another address space belongs to - // an error space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - // - // HTTP Mapping: 500 Internal Server Error - UNKNOWN = 2; - - // The client specified an invalid argument. Note that this differs - // from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - // - // HTTP Mapping: 400 Bad Request - INVALID_ARGUMENT = 3; - - // The deadline expired before the operation could complete. For operations - // that change the state of the system, this error may be returned - // even if the operation has completed successfully. For example, a - // successful response from a server could have been delayed long - // enough for the deadline to expire. - // - // HTTP Mapping: 504 Gateway Timeout - DEADLINE_EXCEEDED = 4; - - // Some requested entity (e.g., file or directory) was not found. - // - // Note to server developers: if a request is denied for an entire class - // of users, such as gradual feature rollout or undocumented whitelist, - // `NOT_FOUND` may be used. If a request is denied for some users within - // a class of users, such as user-based access control, `PERMISSION_DENIED` - // must be used. - // - // HTTP Mapping: 404 Not Found - NOT_FOUND = 5; - - // The entity that a client attempted to create (e.g., file or directory) - // already exists. - // - // HTTP Mapping: 409 Conflict - ALREADY_EXISTS = 6; - - // The caller does not have permission to execute the specified - // operation. `PERMISSION_DENIED` must not be used for rejections - // caused by exhausting some resource (use `RESOURCE_EXHAUSTED` - // instead for those errors). `PERMISSION_DENIED` must not be - // used if the caller can not be identified (use `UNAUTHENTICATED` - // instead for those errors). This error code does not imply the - // request is valid or the requested entity exists or satisfies - // other pre-conditions. - // - // HTTP Mapping: 403 Forbidden - PERMISSION_DENIED = 7; - - // The request does not have valid authentication credentials for the - // operation. - // - // HTTP Mapping: 401 Unauthorized - UNAUTHENTICATED = 16; - - // Some resource has been exhausted, perhaps a per-user quota, or - // perhaps the entire file system is out of space. - // - // HTTP Mapping: 429 Too Many Requests - RESOURCE_EXHAUSTED = 8; - - // The operation was rejected because the system is not in a state - // required for the operation's execution. For example, the directory - // to be deleted is non-empty, an rmdir operation is applied to - // a non-directory, etc. - // - // Service implementors can use the following guidelines to decide - // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`: - // (a) Use `UNAVAILABLE` if the client can retry just the failing call. - // (b) Use `ABORTED` if the client should retry at a higher level - // (e.g., when a client-specified test-and-set fails, indicating the - // client should restart a read-modify-write sequence). - // (c) Use `FAILED_PRECONDITION` if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, `FAILED_PRECONDITION` - // should be returned since the client should not retry unless - // the files are deleted from the directory. - // - // HTTP Mapping: 400 Bad Request - FAILED_PRECONDITION = 9; - - // The operation was aborted, typically due to a concurrency issue such as - // a sequencer check failure or transaction abort. - // - // See the guidelines above for deciding between `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 409 Conflict - ABORTED = 10; - - // The operation was attempted past the valid range. E.g., seeking or - // reading past end-of-file. - // - // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate `INVALID_ARGUMENT` if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // `OUT_OF_RANGE` if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between `FAILED_PRECONDITION` and - // `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an `OUT_OF_RANGE` error to detect when - // they are done. - // - // HTTP Mapping: 400 Bad Request - OUT_OF_RANGE = 11; - - // The operation is not implemented or is not supported/enabled in this - // service. - // - // HTTP Mapping: 501 Not Implemented - UNIMPLEMENTED = 12; - - // Internal errors. This means that some invariants expected by the - // underlying system have been broken. This error code is reserved - // for serious errors. - // - // HTTP Mapping: 500 Internal Server Error - INTERNAL = 13; - - // The service is currently unavailable. This is most likely a - // transient condition, which can be corrected by retrying with - // a backoff. - // - // See the guidelines above for deciding between `FAILED_PRECONDITION`, - // `ABORTED`, and `UNAVAILABLE`. - // - // HTTP Mapping: 503 Service Unavailable - UNAVAILABLE = 14; - - // Unrecoverable data loss or corruption. - // - // HTTP Mapping: 500 Internal Server Error - DATA_LOSS = 15; -} diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go deleted file mode 100644 index 07d251d4b153..000000000000 --- a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go +++ /dev/null @@ -1,4904 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: google/rpc/error_details.proto - -package rpc - -import ( - bytes "bytes" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Describes when the clients can retry a failed request. Clients could ignore -// the recommendation here or retry when this information is missing from error -// responses. -// -// It's always recommended that clients should use exponential backoff when -// retrying. -// -// Clients should wait until `retry_delay` amount of time has passed since -// receiving the error response before retrying. If retrying requests also -// fail, clients should use an exponential backoff scheme to gradually increase -// the delay between retries based on `retry_delay`, until either a maximum -// number of retires have been reached or a maximum retry delay cap has been -// reached. -type RetryInfo struct { - // Clients should wait at least this long between retrying the same request. - RetryDelay *types.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RetryInfo) Reset() { *m = RetryInfo{} } -func (*RetryInfo) ProtoMessage() {} -func (*RetryInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{0} -} -func (m *RetryInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RetryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RetryInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RetryInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_RetryInfo.Merge(m, src) -} -func (m *RetryInfo) XXX_Size() int { - return m.Size() -} -func (m *RetryInfo) XXX_DiscardUnknown() { - xxx_messageInfo_RetryInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_RetryInfo proto.InternalMessageInfo - -func (m *RetryInfo) GetRetryDelay() *types.Duration { - if m != nil { - return m.RetryDelay - } - return nil -} - -func (*RetryInfo) XXX_MessageName() string { - return "google.rpc.RetryInfo" -} - -// Describes additional debugging info. -type DebugInfo struct { - // The stack trace entries indicating where the error occurred. - StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` - // Additional debugging information provided by the server. - Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DebugInfo) Reset() { *m = DebugInfo{} } -func (*DebugInfo) ProtoMessage() {} -func (*DebugInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{1} -} -func (m *DebugInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DebugInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DebugInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DebugInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_DebugInfo.Merge(m, src) -} -func (m *DebugInfo) XXX_Size() int { - return m.Size() -} -func (m *DebugInfo) XXX_DiscardUnknown() { - xxx_messageInfo_DebugInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_DebugInfo proto.InternalMessageInfo - -func (m *DebugInfo) GetStackEntries() []string { - if m != nil { - return m.StackEntries - } - return nil -} - -func (m *DebugInfo) GetDetail() string { - if m != nil { - return m.Detail - } - return "" -} - -func (*DebugInfo) XXX_MessageName() string { - return "google.rpc.DebugInfo" -} - -// Describes how a quota check failed. -// -// For example if a daily limit was exceeded for the calling project, -// a service could respond with a QuotaFailure detail containing the project -// id and the description of the quota limit that was exceeded. If the -// calling project hasn't enabled the service in the developer console, then -// a service could respond with the project id and set `service_disabled` -// to true. -// -// Also see RetryDetail and Help types for other details about handling a -// quota failure. -type QuotaFailure struct { - // Describes all quota violations. - Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QuotaFailure) Reset() { *m = QuotaFailure{} } -func (*QuotaFailure) ProtoMessage() {} -func (*QuotaFailure) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{2} -} -func (m *QuotaFailure) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QuotaFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QuotaFailure.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QuotaFailure) XXX_Merge(src proto.Message) { - xxx_messageInfo_QuotaFailure.Merge(m, src) -} -func (m *QuotaFailure) XXX_Size() int { - return m.Size() -} -func (m *QuotaFailure) XXX_DiscardUnknown() { - xxx_messageInfo_QuotaFailure.DiscardUnknown(m) -} - -var xxx_messageInfo_QuotaFailure proto.InternalMessageInfo - -func (m *QuotaFailure) GetViolations() []*QuotaFailure_Violation { - if m != nil { - return m.Violations - } - return nil -} - -func (*QuotaFailure) XXX_MessageName() string { - return "google.rpc.QuotaFailure" -} - -// A message type used to describe a single quota violation. For example, a -// daily quota or a custom quota that was exceeded. -type QuotaFailure_Violation struct { - // The subject on which the quota check failed. - // For example, "clientip:" or "project:". - Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` - // A description of how the quota check failed. Clients can use this - // description to find more about the quota configuration in the service's - // public documentation, or find the relevant quota limit to adjust through - // developer console. - // - // For example: "Service disabled" or "Daily Limit for read operations - // exceeded". - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *QuotaFailure_Violation) Reset() { *m = QuotaFailure_Violation{} } -func (*QuotaFailure_Violation) ProtoMessage() {} -func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{2, 0} -} -func (m *QuotaFailure_Violation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QuotaFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QuotaFailure_Violation.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QuotaFailure_Violation) XXX_Merge(src proto.Message) { - xxx_messageInfo_QuotaFailure_Violation.Merge(m, src) -} -func (m *QuotaFailure_Violation) XXX_Size() int { - return m.Size() -} -func (m *QuotaFailure_Violation) XXX_DiscardUnknown() { - xxx_messageInfo_QuotaFailure_Violation.DiscardUnknown(m) -} - -var xxx_messageInfo_QuotaFailure_Violation proto.InternalMessageInfo - -func (m *QuotaFailure_Violation) GetSubject() string { - if m != nil { - return m.Subject - } - return "" -} - -func (m *QuotaFailure_Violation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (*QuotaFailure_Violation) XXX_MessageName() string { - return "google.rpc.QuotaFailure.Violation" -} - -// Describes what preconditions have failed. -// -// For example, if an RPC failed because it required the Terms of Service to be -// acknowledged, it could list the terms of service violation in the -// PreconditionFailure message. -type PreconditionFailure struct { - // Describes all precondition violations. - Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PreconditionFailure) Reset() { *m = PreconditionFailure{} } -func (*PreconditionFailure) ProtoMessage() {} -func (*PreconditionFailure) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{3} -} -func (m *PreconditionFailure) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PreconditionFailure) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PreconditionFailure.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PreconditionFailure) XXX_Merge(src proto.Message) { - xxx_messageInfo_PreconditionFailure.Merge(m, src) -} -func (m *PreconditionFailure) XXX_Size() int { - return m.Size() -} -func (m *PreconditionFailure) XXX_DiscardUnknown() { - xxx_messageInfo_PreconditionFailure.DiscardUnknown(m) -} - -var xxx_messageInfo_PreconditionFailure proto.InternalMessageInfo - -func (m *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { - if m != nil { - return m.Violations - } - return nil -} - -func (*PreconditionFailure) XXX_MessageName() string { - return "google.rpc.PreconditionFailure" -} - -// A message type used to describe a single precondition failure. -type PreconditionFailure_Violation struct { - // The type of PreconditionFailure. We recommend using a service-specific - // enum type to define the supported precondition violation types. For - // example, "TOS" for "Terms of Service violation". - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - // The subject, relative to the type, that failed. - // For example, "google.com/cloud" relative to the "TOS" type would - // indicate which terms of service is being referenced. - Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` - // A description of how the precondition failed. Developers can use this - // description to understand how to fix the failure. - // - // For example: "Terms of service not accepted". - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PreconditionFailure_Violation) Reset() { *m = PreconditionFailure_Violation{} } -func (*PreconditionFailure_Violation) ProtoMessage() {} -func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{3, 0} -} -func (m *PreconditionFailure_Violation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PreconditionFailure_Violation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PreconditionFailure_Violation.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PreconditionFailure_Violation) XXX_Merge(src proto.Message) { - xxx_messageInfo_PreconditionFailure_Violation.Merge(m, src) -} -func (m *PreconditionFailure_Violation) XXX_Size() int { - return m.Size() -} -func (m *PreconditionFailure_Violation) XXX_DiscardUnknown() { - xxx_messageInfo_PreconditionFailure_Violation.DiscardUnknown(m) -} - -var xxx_messageInfo_PreconditionFailure_Violation proto.InternalMessageInfo - -func (m *PreconditionFailure_Violation) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *PreconditionFailure_Violation) GetSubject() string { - if m != nil { - return m.Subject - } - return "" -} - -func (m *PreconditionFailure_Violation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (*PreconditionFailure_Violation) XXX_MessageName() string { - return "google.rpc.PreconditionFailure.Violation" -} - -// Describes violations in a client request. This error type focuses on the -// syntactic aspects of the request. -type BadRequest struct { - // Describes all violations in a client request. - FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BadRequest) Reset() { *m = BadRequest{} } -func (*BadRequest) ProtoMessage() {} -func (*BadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{4} -} -func (m *BadRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BadRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BadRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_BadRequest.Merge(m, src) -} -func (m *BadRequest) XXX_Size() int { - return m.Size() -} -func (m *BadRequest) XXX_DiscardUnknown() { - xxx_messageInfo_BadRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_BadRequest proto.InternalMessageInfo - -func (m *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { - if m != nil { - return m.FieldViolations - } - return nil -} - -func (*BadRequest) XXX_MessageName() string { - return "google.rpc.BadRequest" -} - -// A message type used to describe a single bad request field. -type BadRequest_FieldViolation struct { - // A path leading to a field in the request body. The value will be a - // sequence of dot-separated identifiers that identify a protocol buffer - // field. E.g., "field_violations.field" would identify this field. - Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` - // A description of why the request element is bad. - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BadRequest_FieldViolation) Reset() { *m = BadRequest_FieldViolation{} } -func (*BadRequest_FieldViolation) ProtoMessage() {} -func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{4, 0} -} -func (m *BadRequest_FieldViolation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BadRequest_FieldViolation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BadRequest_FieldViolation.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BadRequest_FieldViolation) XXX_Merge(src proto.Message) { - xxx_messageInfo_BadRequest_FieldViolation.Merge(m, src) -} -func (m *BadRequest_FieldViolation) XXX_Size() int { - return m.Size() -} -func (m *BadRequest_FieldViolation) XXX_DiscardUnknown() { - xxx_messageInfo_BadRequest_FieldViolation.DiscardUnknown(m) -} - -var xxx_messageInfo_BadRequest_FieldViolation proto.InternalMessageInfo - -func (m *BadRequest_FieldViolation) GetField() string { - if m != nil { - return m.Field - } - return "" -} - -func (m *BadRequest_FieldViolation) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (*BadRequest_FieldViolation) XXX_MessageName() string { - return "google.rpc.BadRequest.FieldViolation" -} - -// Contains metadata about the request that clients can attach when filing a bug -// or providing other forms of feedback. -type RequestInfo struct { - // An opaque string that should only be interpreted by the service generating - // it. For example, it can be used to identify requests in the service's logs. - RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` - // Any data that was used to serve this request. For example, an encrypted - // stack trace that can be sent back to the service provider for debugging. - ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RequestInfo) Reset() { *m = RequestInfo{} } -func (*RequestInfo) ProtoMessage() {} -func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{5} -} -func (m *RequestInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestInfo.Merge(m, src) -} -func (m *RequestInfo) XXX_Size() int { - return m.Size() -} -func (m *RequestInfo) XXX_DiscardUnknown() { - xxx_messageInfo_RequestInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestInfo proto.InternalMessageInfo - -func (m *RequestInfo) GetRequestId() string { - if m != nil { - return m.RequestId - } - return "" -} - -func (m *RequestInfo) GetServingData() string { - if m != nil { - return m.ServingData - } - return "" -} - -func (*RequestInfo) XXX_MessageName() string { - return "google.rpc.RequestInfo" -} - -// Describes the resource that is being accessed. -type ResourceInfo struct { - // A name for the type of resource being accessed, e.g. "sql table", - // "cloud storage bucket", "file", "Google calendar"; or the type URL - // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". - ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` - // The name of the resource being accessed. For example, a shared calendar - // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is - // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. - ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` - // The owner of the resource (optional). - // For example, "user:" or "project:". - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` - // Describes what error is encountered when accessing this resource. - // For example, updating a cloud project may require the `writer` permission - // on the developer console project. - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResourceInfo) Reset() { *m = ResourceInfo{} } -func (*ResourceInfo) ProtoMessage() {} -func (*ResourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{6} -} -func (m *ResourceInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceInfo.Merge(m, src) -} -func (m *ResourceInfo) XXX_Size() int { - return m.Size() -} -func (m *ResourceInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceInfo proto.InternalMessageInfo - -func (m *ResourceInfo) GetResourceType() string { - if m != nil { - return m.ResourceType - } - return "" -} - -func (m *ResourceInfo) GetResourceName() string { - if m != nil { - return m.ResourceName - } - return "" -} - -func (m *ResourceInfo) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *ResourceInfo) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (*ResourceInfo) XXX_MessageName() string { - return "google.rpc.ResourceInfo" -} - -// Provides links to documentation or for performing an out of band action. -// -// For example, if a quota check failed with an error indicating the calling -// project hasn't enabled the accessed service, this can contain a URL pointing -// directly to the right place in the developer console to flip the bit. -type Help struct { - // URL(s) pointing to additional information on handling the current error. - Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Help) Reset() { *m = Help{} } -func (*Help) ProtoMessage() {} -func (*Help) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{7} -} -func (m *Help) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Help) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Help.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Help) XXX_Merge(src proto.Message) { - xxx_messageInfo_Help.Merge(m, src) -} -func (m *Help) XXX_Size() int { - return m.Size() -} -func (m *Help) XXX_DiscardUnknown() { - xxx_messageInfo_Help.DiscardUnknown(m) -} - -var xxx_messageInfo_Help proto.InternalMessageInfo - -func (m *Help) GetLinks() []*Help_Link { - if m != nil { - return m.Links - } - return nil -} - -func (*Help) XXX_MessageName() string { - return "google.rpc.Help" -} - -// Describes a URL link. -type Help_Link struct { - // Describes what the link offers. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // The URL of the link. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Help_Link) Reset() { *m = Help_Link{} } -func (*Help_Link) ProtoMessage() {} -func (*Help_Link) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{7, 0} -} -func (m *Help_Link) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Help_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Help_Link.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Help_Link) XXX_Merge(src proto.Message) { - xxx_messageInfo_Help_Link.Merge(m, src) -} -func (m *Help_Link) XXX_Size() int { - return m.Size() -} -func (m *Help_Link) XXX_DiscardUnknown() { - xxx_messageInfo_Help_Link.DiscardUnknown(m) -} - -var xxx_messageInfo_Help_Link proto.InternalMessageInfo - -func (m *Help_Link) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *Help_Link) GetUrl() string { - if m != nil { - return m.Url - } - return "" -} - -func (*Help_Link) XXX_MessageName() string { - return "google.rpc.Help.Link" -} - -// Provides a localized error message that is safe to return to the user -// which can be attached to an RPC error. -type LocalizedMessage struct { - // The locale used following the specification defined at - // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. - // Examples are: "en-US", "fr-CH", "es-MX" - Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` - // The localized error message in the above locale. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *LocalizedMessage) Reset() { *m = LocalizedMessage{} } -func (*LocalizedMessage) ProtoMessage() {} -func (*LocalizedMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{8} -} -func (m *LocalizedMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LocalizedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LocalizedMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LocalizedMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_LocalizedMessage.Merge(m, src) -} -func (m *LocalizedMessage) XXX_Size() int { - return m.Size() -} -func (m *LocalizedMessage) XXX_DiscardUnknown() { - xxx_messageInfo_LocalizedMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_LocalizedMessage proto.InternalMessageInfo - -func (m *LocalizedMessage) GetLocale() string { - if m != nil { - return m.Locale - } - return "" -} - -func (m *LocalizedMessage) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (*LocalizedMessage) XXX_MessageName() string { - return "google.rpc.LocalizedMessage" -} -func init() { - proto.RegisterType((*RetryInfo)(nil), "google.rpc.RetryInfo") - proto.RegisterType((*DebugInfo)(nil), "google.rpc.DebugInfo") - proto.RegisterType((*QuotaFailure)(nil), "google.rpc.QuotaFailure") - proto.RegisterType((*QuotaFailure_Violation)(nil), "google.rpc.QuotaFailure.Violation") - proto.RegisterType((*PreconditionFailure)(nil), "google.rpc.PreconditionFailure") - proto.RegisterType((*PreconditionFailure_Violation)(nil), "google.rpc.PreconditionFailure.Violation") - proto.RegisterType((*BadRequest)(nil), "google.rpc.BadRequest") - proto.RegisterType((*BadRequest_FieldViolation)(nil), "google.rpc.BadRequest.FieldViolation") - proto.RegisterType((*RequestInfo)(nil), "google.rpc.RequestInfo") - proto.RegisterType((*ResourceInfo)(nil), "google.rpc.ResourceInfo") - proto.RegisterType((*Help)(nil), "google.rpc.Help") - proto.RegisterType((*Help_Link)(nil), "google.rpc.Help.Link") - proto.RegisterType((*LocalizedMessage)(nil), "google.rpc.LocalizedMessage") -} - -func init() { proto.RegisterFile("google/rpc/error_details.proto", fileDescriptor_851816e4d6b6361a) } - -var fileDescriptor_851816e4d6b6361a = []byte{ - // 624 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xbf, 0x6f, 0xd3, 0x40, - 0x18, 0xed, 0x35, 0x69, 0x91, 0xbf, 0x84, 0x52, 0xcc, 0x0f, 0x85, 0x48, 0x9c, 0x82, 0x11, 0x52, - 0x11, 0x92, 0x2b, 0x95, 0xad, 0x63, 0x48, 0x7f, 0x49, 0x05, 0x82, 0x85, 0x18, 0x60, 0xb0, 0x2e, - 0xf6, 0x97, 0xe8, 0xa8, 0xe3, 0x33, 0x67, 0xbb, 0xa8, 0x4c, 0xfc, 0x09, 0xec, 0x6c, 0x4c, 0xfd, - 0x27, 0xd8, 0x3b, 0x76, 0x64, 0x24, 0xe9, 0xc2, 0xd8, 0x91, 0x11, 0x9d, 0x7d, 0xd7, 0xba, 0x4d, - 0x41, 0x6c, 0x7e, 0xef, 0xde, 0x3d, 0xbf, 0xf7, 0xe9, 0xee, 0x80, 0x8e, 0x84, 0x18, 0x45, 0xb8, - 0x2a, 0x93, 0x60, 0x15, 0xa5, 0x14, 0xd2, 0x0f, 0x31, 0x63, 0x3c, 0x4a, 0xdd, 0x44, 0x8a, 0x4c, - 0xd8, 0x50, 0xae, 0xbb, 0x32, 0x09, 0xda, 0x46, 0x5b, 0xac, 0x0c, 0xf2, 0xe1, 0x6a, 0x98, 0x4b, - 0x96, 0x71, 0x11, 0x97, 0x5a, 0x67, 0x0b, 0x2c, 0x0f, 0x33, 0x79, 0xb0, 0x13, 0x0f, 0x85, 0xbd, - 0x0e, 0x0d, 0xa9, 0x80, 0x1f, 0x62, 0xc4, 0x0e, 0x5a, 0xa4, 0x43, 0x56, 0x1a, 0x6b, 0xf7, 0x5c, - 0x6d, 0x67, 0x2c, 0xdc, 0x9e, 0xb6, 0xf0, 0xa0, 0x50, 0xf7, 0x94, 0xd8, 0xd9, 0x06, 0xab, 0x87, - 0x83, 0x7c, 0x54, 0x18, 0x3d, 0x84, 0xeb, 0x69, 0xc6, 0x82, 0x3d, 0x1f, 0xe3, 0x4c, 0x72, 0x4c, - 0x5b, 0xa4, 0x53, 0x5b, 0xb1, 0xbc, 0x66, 0x41, 0x6e, 0x94, 0x9c, 0x7d, 0x17, 0x16, 0xcb, 0xdc, - 0xad, 0xf9, 0x0e, 0x59, 0xb1, 0x3c, 0x8d, 0x9c, 0xaf, 0x04, 0x9a, 0xaf, 0x72, 0x91, 0xb1, 0x4d, - 0xc6, 0xa3, 0x5c, 0xa2, 0xdd, 0x05, 0xd8, 0xe7, 0x22, 0x2a, 0xfe, 0x59, 0x5a, 0x35, 0xd6, 0x1c, - 0xf7, 0xbc, 0xa4, 0x5b, 0x55, 0xbb, 0x6f, 0x8c, 0xd4, 0xab, 0xec, 0x6a, 0x6f, 0x81, 0x75, 0xb6, - 0x60, 0xb7, 0xe0, 0x5a, 0x9a, 0x0f, 0xde, 0x63, 0x90, 0x15, 0x1d, 0x2d, 0xcf, 0x40, 0xbb, 0x03, - 0x8d, 0x10, 0xd3, 0x40, 0xf2, 0x44, 0x09, 0x75, 0xb0, 0x2a, 0xe5, 0x7c, 0x27, 0x70, 0xab, 0x2f, - 0x31, 0x10, 0x71, 0xc8, 0x15, 0x61, 0x42, 0xee, 0x5c, 0x11, 0xf2, 0x71, 0x35, 0xe4, 0x15, 0x9b, - 0xfe, 0x92, 0xf5, 0x5d, 0x35, 0xab, 0x0d, 0xf5, 0xec, 0x20, 0x41, 0x1d, 0xb4, 0xf8, 0xae, 0xe6, - 0x9f, 0xff, 0x67, 0xfe, 0xda, 0x6c, 0xfe, 0x43, 0x02, 0xd0, 0x65, 0xa1, 0x87, 0x1f, 0x72, 0x4c, - 0x33, 0xbb, 0x0f, 0xcb, 0x43, 0x8e, 0x51, 0xe8, 0xcf, 0x84, 0x7f, 0x54, 0x0d, 0x7f, 0xbe, 0xc3, - 0xdd, 0x54, 0xf2, 0xf3, 0xe0, 0x37, 0x86, 0x17, 0x70, 0xda, 0xde, 0x86, 0xa5, 0x8b, 0x12, 0xfb, - 0x36, 0x2c, 0x14, 0x22, 0xdd, 0xa1, 0x04, 0xff, 0x31, 0xea, 0x97, 0xd0, 0xd0, 0x3f, 0x2d, 0x0e, - 0xd5, 0x7d, 0x00, 0x59, 0x42, 0x9f, 0x1b, 0x2f, 0x4b, 0x33, 0x3b, 0xa1, 0xfd, 0x00, 0x9a, 0x29, - 0xca, 0x7d, 0x1e, 0x8f, 0xfc, 0x90, 0x65, 0xcc, 0x18, 0x6a, 0xae, 0xc7, 0x32, 0xe6, 0x7c, 0x21, - 0xd0, 0xf4, 0x30, 0x15, 0xb9, 0x0c, 0xd0, 0x9c, 0x53, 0xa9, 0xb1, 0x5f, 0x99, 0x72, 0xd3, 0x90, - 0xaf, 0xd5, 0xb4, 0xab, 0xa2, 0x98, 0x8d, 0x51, 0x3b, 0x9f, 0x89, 0x5e, 0xb0, 0x31, 0xaa, 0x8e, - 0xe2, 0x63, 0x8c, 0x52, 0x8f, 0xbc, 0x04, 0x97, 0x3b, 0xd6, 0x67, 0x3b, 0x0a, 0xa8, 0x6f, 0x63, - 0x94, 0xd8, 0x4f, 0x60, 0x21, 0xe2, 0xf1, 0x9e, 0x19, 0xfe, 0x9d, 0xea, 0xf0, 0x95, 0xc0, 0xdd, - 0xe5, 0xf1, 0x9e, 0x57, 0x6a, 0xda, 0xeb, 0x50, 0x57, 0xf0, 0xb2, 0x3d, 0x99, 0xb1, 0xb7, 0x97, - 0xa1, 0x96, 0x4b, 0x73, 0xc1, 0xd4, 0xa7, 0xd3, 0x83, 0xe5, 0x5d, 0x11, 0xb0, 0x88, 0x7f, 0xc2, - 0xf0, 0x39, 0xa6, 0x29, 0x1b, 0xa1, 0xba, 0x89, 0x91, 0xe2, 0x4c, 0x7f, 0x8d, 0xd4, 0x39, 0x1b, - 0x97, 0x12, 0x73, 0xce, 0x34, 0xec, 0x86, 0xc7, 0x13, 0x3a, 0xf7, 0x63, 0x42, 0xe7, 0x4e, 0x27, - 0x94, 0xfc, 0x9e, 0x50, 0xf2, 0x79, 0x4a, 0xc9, 0xe1, 0x94, 0x92, 0xa3, 0x29, 0x25, 0xc7, 0x53, - 0x4a, 0x7e, 0x4e, 0x29, 0xf9, 0x35, 0xa5, 0x73, 0xa7, 0x8a, 0x3f, 0xa1, 0xe4, 0xe8, 0x84, 0x12, - 0x58, 0x0a, 0xc4, 0xb8, 0x52, 0xac, 0x7b, 0x73, 0x43, 0xbd, 0x5e, 0xbd, 0xf2, 0xf1, 0xea, 0xab, - 0xe7, 0xa5, 0x4f, 0xde, 0xd6, 0x64, 0x12, 0x7c, 0x9b, 0xaf, 0x79, 0xfd, 0x67, 0x83, 0xc5, 0xe2, - 0xc9, 0x79, 0xfa, 0x27, 0x00, 0x00, 0xff, 0xff, 0x63, 0xe4, 0x76, 0x26, 0xf1, 0x04, 0x00, 0x00, -} - -func (this *RetryInfo) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*RetryInfo) - if !ok { - that2, ok := that.(RetryInfo) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if c := this.RetryDelay.Compare(that1.RetryDelay); c != 0 { - return c - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *DebugInfo) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*DebugInfo) - if !ok { - that2, ok := that.(DebugInfo) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if len(this.StackEntries) != len(that1.StackEntries) { - if len(this.StackEntries) < len(that1.StackEntries) { - return -1 - } - return 1 - } - for i := range this.StackEntries { - if this.StackEntries[i] != that1.StackEntries[i] { - if this.StackEntries[i] < that1.StackEntries[i] { - return -1 - } - return 1 - } - } - if this.Detail != that1.Detail { - if this.Detail < that1.Detail { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *QuotaFailure) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*QuotaFailure) - if !ok { - that2, ok := that.(QuotaFailure) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if len(this.Violations) != len(that1.Violations) { - if len(this.Violations) < len(that1.Violations) { - return -1 - } - return 1 - } - for i := range this.Violations { - if c := this.Violations[i].Compare(that1.Violations[i]); c != 0 { - return c - } - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *QuotaFailure_Violation) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*QuotaFailure_Violation) - if !ok { - that2, ok := that.(QuotaFailure_Violation) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.Subject != that1.Subject { - if this.Subject < that1.Subject { - return -1 - } - return 1 - } - if this.Description != that1.Description { - if this.Description < that1.Description { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *PreconditionFailure) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*PreconditionFailure) - if !ok { - that2, ok := that.(PreconditionFailure) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if len(this.Violations) != len(that1.Violations) { - if len(this.Violations) < len(that1.Violations) { - return -1 - } - return 1 - } - for i := range this.Violations { - if c := this.Violations[i].Compare(that1.Violations[i]); c != 0 { - return c - } - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *PreconditionFailure_Violation) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*PreconditionFailure_Violation) - if !ok { - that2, ok := that.(PreconditionFailure_Violation) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.Type != that1.Type { - if this.Type < that1.Type { - return -1 - } - return 1 - } - if this.Subject != that1.Subject { - if this.Subject < that1.Subject { - return -1 - } - return 1 - } - if this.Description != that1.Description { - if this.Description < that1.Description { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *BadRequest) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*BadRequest) - if !ok { - that2, ok := that.(BadRequest) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if len(this.FieldViolations) != len(that1.FieldViolations) { - if len(this.FieldViolations) < len(that1.FieldViolations) { - return -1 - } - return 1 - } - for i := range this.FieldViolations { - if c := this.FieldViolations[i].Compare(that1.FieldViolations[i]); c != 0 { - return c - } - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *BadRequest_FieldViolation) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*BadRequest_FieldViolation) - if !ok { - that2, ok := that.(BadRequest_FieldViolation) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.Field != that1.Field { - if this.Field < that1.Field { - return -1 - } - return 1 - } - if this.Description != that1.Description { - if this.Description < that1.Description { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *RequestInfo) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*RequestInfo) - if !ok { - that2, ok := that.(RequestInfo) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.RequestId != that1.RequestId { - if this.RequestId < that1.RequestId { - return -1 - } - return 1 - } - if this.ServingData != that1.ServingData { - if this.ServingData < that1.ServingData { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *ResourceInfo) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*ResourceInfo) - if !ok { - that2, ok := that.(ResourceInfo) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.ResourceType != that1.ResourceType { - if this.ResourceType < that1.ResourceType { - return -1 - } - return 1 - } - if this.ResourceName != that1.ResourceName { - if this.ResourceName < that1.ResourceName { - return -1 - } - return 1 - } - if this.Owner != that1.Owner { - if this.Owner < that1.Owner { - return -1 - } - return 1 - } - if this.Description != that1.Description { - if this.Description < that1.Description { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *Help) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*Help) - if !ok { - that2, ok := that.(Help) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if len(this.Links) != len(that1.Links) { - if len(this.Links) < len(that1.Links) { - return -1 - } - return 1 - } - for i := range this.Links { - if c := this.Links[i].Compare(that1.Links[i]); c != 0 { - return c - } - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *Help_Link) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*Help_Link) - if !ok { - that2, ok := that.(Help_Link) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.Description != that1.Description { - if this.Description < that1.Description { - return -1 - } - return 1 - } - if this.Url != that1.Url { - if this.Url < that1.Url { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *LocalizedMessage) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*LocalizedMessage) - if !ok { - that2, ok := that.(LocalizedMessage) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.Locale != that1.Locale { - if this.Locale < that1.Locale { - return -1 - } - return 1 - } - if this.Message != that1.Message { - if this.Message < that1.Message { - return -1 - } - return 1 - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *RetryInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RetryInfo) - if !ok { - that2, ok := that.(RetryInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.RetryDelay.Equal(that1.RetryDelay) { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *DebugInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*DebugInfo) - if !ok { - that2, ok := that.(DebugInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.StackEntries) != len(that1.StackEntries) { - return false - } - for i := range this.StackEntries { - if this.StackEntries[i] != that1.StackEntries[i] { - return false - } - } - if this.Detail != that1.Detail { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *QuotaFailure) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QuotaFailure) - if !ok { - that2, ok := that.(QuotaFailure) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Violations) != len(that1.Violations) { - return false - } - for i := range this.Violations { - if !this.Violations[i].Equal(that1.Violations[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *QuotaFailure_Violation) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*QuotaFailure_Violation) - if !ok { - that2, ok := that.(QuotaFailure_Violation) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Subject != that1.Subject { - return false - } - if this.Description != that1.Description { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *PreconditionFailure) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PreconditionFailure) - if !ok { - that2, ok := that.(PreconditionFailure) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Violations) != len(that1.Violations) { - return false - } - for i := range this.Violations { - if !this.Violations[i].Equal(that1.Violations[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *PreconditionFailure_Violation) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*PreconditionFailure_Violation) - if !ok { - that2, ok := that.(PreconditionFailure_Violation) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if this.Subject != that1.Subject { - return false - } - if this.Description != that1.Description { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *BadRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BadRequest) - if !ok { - that2, ok := that.(BadRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.FieldViolations) != len(that1.FieldViolations) { - return false - } - for i := range this.FieldViolations { - if !this.FieldViolations[i].Equal(that1.FieldViolations[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *BadRequest_FieldViolation) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BadRequest_FieldViolation) - if !ok { - that2, ok := that.(BadRequest_FieldViolation) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Field != that1.Field { - return false - } - if this.Description != that1.Description { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RequestInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*RequestInfo) - if !ok { - that2, ok := that.(RequestInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.RequestId != that1.RequestId { - return false - } - if this.ServingData != that1.ServingData { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *ResourceInfo) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResourceInfo) - if !ok { - that2, ok := that.(ResourceInfo) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ResourceType != that1.ResourceType { - return false - } - if this.ResourceName != that1.ResourceName { - return false - } - if this.Owner != that1.Owner { - return false - } - if this.Description != that1.Description { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Help) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Help) - if !ok { - that2, ok := that.(Help) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if len(this.Links) != len(that1.Links) { - return false - } - for i := range this.Links { - if !this.Links[i].Equal(that1.Links[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Help_Link) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Help_Link) - if !ok { - that2, ok := that.(Help_Link) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Description != that1.Description { - return false - } - if this.Url != that1.Url { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *LocalizedMessage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*LocalizedMessage) - if !ok { - that2, ok := that.(LocalizedMessage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Locale != that1.Locale { - return false - } - if this.Message != that1.Message { - return false - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *RetryInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.RetryInfo{") - if this.RetryDelay != nil { - s = append(s, "RetryDelay: "+fmt.Sprintf("%#v", this.RetryDelay)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *DebugInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.DebugInfo{") - s = append(s, "StackEntries: "+fmt.Sprintf("%#v", this.StackEntries)+",\n") - s = append(s, "Detail: "+fmt.Sprintf("%#v", this.Detail)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QuotaFailure) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.QuotaFailure{") - if this.Violations != nil { - s = append(s, "Violations: "+fmt.Sprintf("%#v", this.Violations)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *QuotaFailure_Violation) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.QuotaFailure_Violation{") - s = append(s, "Subject: "+fmt.Sprintf("%#v", this.Subject)+",\n") - s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PreconditionFailure) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.PreconditionFailure{") - if this.Violations != nil { - s = append(s, "Violations: "+fmt.Sprintf("%#v", this.Violations)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *PreconditionFailure_Violation) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&rpc.PreconditionFailure_Violation{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - s = append(s, "Subject: "+fmt.Sprintf("%#v", this.Subject)+",\n") - s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *BadRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.BadRequest{") - if this.FieldViolations != nil { - s = append(s, "FieldViolations: "+fmt.Sprintf("%#v", this.FieldViolations)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *BadRequest_FieldViolation) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.BadRequest_FieldViolation{") - s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") - s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *RequestInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.RequestInfo{") - s = append(s, "RequestId: "+fmt.Sprintf("%#v", this.RequestId)+",\n") - s = append(s, "ServingData: "+fmt.Sprintf("%#v", this.ServingData)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *ResourceInfo) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&rpc.ResourceInfo{") - s = append(s, "ResourceType: "+fmt.Sprintf("%#v", this.ResourceType)+",\n") - s = append(s, "ResourceName: "+fmt.Sprintf("%#v", this.ResourceName)+",\n") - s = append(s, "Owner: "+fmt.Sprintf("%#v", this.Owner)+",\n") - s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Help) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&rpc.Help{") - if this.Links != nil { - s = append(s, "Links: "+fmt.Sprintf("%#v", this.Links)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *Help_Link) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.Help_Link{") - s = append(s, "Description: "+fmt.Sprintf("%#v", this.Description)+",\n") - s = append(s, "Url: "+fmt.Sprintf("%#v", this.Url)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *LocalizedMessage) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&rpc.LocalizedMessage{") - s = append(s, "Locale: "+fmt.Sprintf("%#v", this.Locale)+",\n") - s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringErrorDetails(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *RetryInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RetryInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RetryInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.RetryDelay != nil { - { - size, err := m.RetryDelay.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintErrorDetails(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DebugInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DebugInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DebugInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Detail) > 0 { - i -= len(m.Detail) - copy(dAtA[i:], m.Detail) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Detail))) - i-- - dAtA[i] = 0x12 - } - if len(m.StackEntries) > 0 { - for iNdEx := len(m.StackEntries) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.StackEntries[iNdEx]) - copy(dAtA[i:], m.StackEntries[iNdEx]) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.StackEntries[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QuotaFailure) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QuotaFailure) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QuotaFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Violations) > 0 { - for iNdEx := len(m.Violations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Violations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintErrorDetails(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QuotaFailure_Violation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QuotaFailure_Violation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QuotaFailure_Violation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Subject) > 0 { - i -= len(m.Subject) - copy(dAtA[i:], m.Subject) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PreconditionFailure) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PreconditionFailure) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PreconditionFailure) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Violations) > 0 { - for iNdEx := len(m.Violations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Violations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintErrorDetails(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PreconditionFailure_Violation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PreconditionFailure_Violation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PreconditionFailure_Violation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x1a - } - if len(m.Subject) > 0 { - i -= len(m.Subject) - copy(dAtA[i:], m.Subject) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject))) - i-- - dAtA[i] = 0x12 - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *BadRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BadRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BadRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.FieldViolations) > 0 { - for iNdEx := len(m.FieldViolations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FieldViolations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintErrorDetails(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *BadRequest_FieldViolation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BadRequest_FieldViolation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BadRequest_FieldViolation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x12 - } - if len(m.Field) > 0 { - i -= len(m.Field) - copy(dAtA[i:], m.Field) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Field))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RequestInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ServingData) > 0 { - i -= len(m.ServingData) - copy(dAtA[i:], m.ServingData) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ServingData))) - i-- - dAtA[i] = 0x12 - } - if len(m.RequestId) > 0 { - i -= len(m.RequestId) - copy(dAtA[i:], m.RequestId) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.RequestId))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResourceInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x1a - } - if len(m.ResourceName) > 0 { - i -= len(m.ResourceName) - copy(dAtA[i:], m.ResourceName) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceName))) - i-- - dAtA[i] = 0x12 - } - if len(m.ResourceType) > 0 { - i -= len(m.ResourceType) - copy(dAtA[i:], m.ResourceType) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceType))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Help) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Help) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Help) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Links) > 0 { - for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintErrorDetails(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Help_Link) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Help_Link) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Help_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Url) > 0 { - i -= len(m.Url) - copy(dAtA[i:], m.Url) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Url))) - i-- - dAtA[i] = 0x12 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *LocalizedMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LocalizedMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LocalizedMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - } - if len(m.Locale) > 0 { - i -= len(m.Locale) - copy(dAtA[i:], m.Locale) - i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Locale))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintErrorDetails(dAtA []byte, offset int, v uint64) int { - offset -= sovErrorDetails(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func NewPopulatedRetryInfo(r randyErrorDetails, easy bool) *RetryInfo { - this := &RetryInfo{} - if r.Intn(5) != 0 { - this.RetryDelay = types.NewPopulatedDuration(r, easy) - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) - } - return this -} - -func NewPopulatedDebugInfo(r randyErrorDetails, easy bool) *DebugInfo { - this := &DebugInfo{} - v1 := r.Intn(10) - this.StackEntries = make([]string, v1) - for i := 0; i < v1; i++ { - this.StackEntries[i] = string(randStringErrorDetails(r)) - } - this.Detail = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) - } - return this -} - -func NewPopulatedQuotaFailure(r randyErrorDetails, easy bool) *QuotaFailure { - this := &QuotaFailure{} - if r.Intn(5) != 0 { - v2 := r.Intn(5) - this.Violations = make([]*QuotaFailure_Violation, v2) - for i := 0; i < v2; i++ { - this.Violations[i] = NewPopulatedQuotaFailure_Violation(r, easy) - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) - } - return this -} - -func NewPopulatedQuotaFailure_Violation(r randyErrorDetails, easy bool) *QuotaFailure_Violation { - this := &QuotaFailure_Violation{} - this.Subject = string(randStringErrorDetails(r)) - this.Description = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) - } - return this -} - -func NewPopulatedPreconditionFailure(r randyErrorDetails, easy bool) *PreconditionFailure { - this := &PreconditionFailure{} - if r.Intn(5) != 0 { - v3 := r.Intn(5) - this.Violations = make([]*PreconditionFailure_Violation, v3) - for i := 0; i < v3; i++ { - this.Violations[i] = NewPopulatedPreconditionFailure_Violation(r, easy) - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) - } - return this -} - -func NewPopulatedPreconditionFailure_Violation(r randyErrorDetails, easy bool) *PreconditionFailure_Violation { - this := &PreconditionFailure_Violation{} - this.Type = string(randStringErrorDetails(r)) - this.Subject = string(randStringErrorDetails(r)) - this.Description = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 4) - } - return this -} - -func NewPopulatedBadRequest(r randyErrorDetails, easy bool) *BadRequest { - this := &BadRequest{} - if r.Intn(5) != 0 { - v4 := r.Intn(5) - this.FieldViolations = make([]*BadRequest_FieldViolation, v4) - for i := 0; i < v4; i++ { - this.FieldViolations[i] = NewPopulatedBadRequest_FieldViolation(r, easy) - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) - } - return this -} - -func NewPopulatedBadRequest_FieldViolation(r randyErrorDetails, easy bool) *BadRequest_FieldViolation { - this := &BadRequest_FieldViolation{} - this.Field = string(randStringErrorDetails(r)) - this.Description = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) - } - return this -} - -func NewPopulatedRequestInfo(r randyErrorDetails, easy bool) *RequestInfo { - this := &RequestInfo{} - this.RequestId = string(randStringErrorDetails(r)) - this.ServingData = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) - } - return this -} - -func NewPopulatedResourceInfo(r randyErrorDetails, easy bool) *ResourceInfo { - this := &ResourceInfo{} - this.ResourceType = string(randStringErrorDetails(r)) - this.ResourceName = string(randStringErrorDetails(r)) - this.Owner = string(randStringErrorDetails(r)) - this.Description = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 5) - } - return this -} - -func NewPopulatedHelp(r randyErrorDetails, easy bool) *Help { - this := &Help{} - if r.Intn(5) != 0 { - v5 := r.Intn(5) - this.Links = make([]*Help_Link, v5) - for i := 0; i < v5; i++ { - this.Links[i] = NewPopulatedHelp_Link(r, easy) - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 2) - } - return this -} - -func NewPopulatedHelp_Link(r randyErrorDetails, easy bool) *Help_Link { - this := &Help_Link{} - this.Description = string(randStringErrorDetails(r)) - this.Url = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) - } - return this -} - -func NewPopulatedLocalizedMessage(r randyErrorDetails, easy bool) *LocalizedMessage { - this := &LocalizedMessage{} - this.Locale = string(randStringErrorDetails(r)) - this.Message = string(randStringErrorDetails(r)) - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 3) - } - return this -} - -type randyErrorDetails interface { - Float32() float32 - Float64() float64 - Int63() int64 - Int31() int32 - Uint32() uint32 - Intn(n int) int -} - -func randUTF8RuneErrorDetails(r randyErrorDetails) rune { - ru := r.Intn(62) - if ru < 10 { - return rune(ru + 48) - } else if ru < 36 { - return rune(ru + 55) - } - return rune(ru + 61) -} -func randStringErrorDetails(r randyErrorDetails) string { - v6 := r.Intn(100) - tmps := make([]rune, v6) - for i := 0; i < v6; i++ { - tmps[i] = randUTF8RuneErrorDetails(r) - } - return string(tmps) -} -func randUnrecognizedErrorDetails(r randyErrorDetails, maxFieldNumber int) (dAtA []byte) { - l := r.Intn(5) - for i := 0; i < l; i++ { - wire := r.Intn(4) - if wire == 3 { - wire = 5 - } - fieldNumber := maxFieldNumber + r.Intn(100) - dAtA = randFieldErrorDetails(dAtA, r, fieldNumber, wire) - } - return dAtA -} -func randFieldErrorDetails(dAtA []byte, r randyErrorDetails, fieldNumber int, wire int) []byte { - key := uint32(fieldNumber)<<3 | uint32(wire) - switch wire { - case 0: - dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) - v7 := r.Int63() - if r.Intn(2) == 0 { - v7 *= -1 - } - dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(v7)) - case 1: - dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - case 2: - dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) - ll := r.Intn(100) - dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(ll)) - for j := 0; j < ll; j++ { - dAtA = append(dAtA, byte(r.Intn(256))) - } - default: - dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - } - return dAtA -} -func encodeVarintPopulateErrorDetails(dAtA []byte, v uint64) []byte { - for v >= 1<<7 { - dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) - v >>= 7 - } - dAtA = append(dAtA, uint8(v)) - return dAtA -} -func (m *RetryInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RetryDelay != nil { - l = m.RetryDelay.Size() - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DebugInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.StackEntries) > 0 { - for _, s := range m.StackEntries { - l = len(s) - n += 1 + l + sovErrorDetails(uint64(l)) - } - } - l = len(m.Detail) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *QuotaFailure) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Violations) > 0 { - for _, e := range m.Violations { - l = e.Size() - n += 1 + l + sovErrorDetails(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *QuotaFailure_Violation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Subject) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PreconditionFailure) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Violations) > 0 { - for _, e := range m.Violations { - l = e.Size() - n += 1 + l + sovErrorDetails(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PreconditionFailure_Violation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Subject) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BadRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.FieldViolations) > 0 { - for _, e := range m.FieldViolations { - l = e.Size() - n += 1 + l + sovErrorDetails(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BadRequest_FieldViolation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Field) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RequestInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.RequestId) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.ServingData) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResourceInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ResourceType) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.ResourceName) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Help) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Links) > 0 { - for _, e := range m.Links { - l = e.Size() - n += 1 + l + sovErrorDetails(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Help_Link) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Description) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Url) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LocalizedMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Locale) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - l = len(m.Message) - if l > 0 { - n += 1 + l + sovErrorDetails(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovErrorDetails(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozErrorDetails(x uint64) (n int) { - return sovErrorDetails(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *RetryInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RetryInfo{`, - `RetryDelay:` + strings.Replace(fmt.Sprintf("%v", this.RetryDelay), "Duration", "types.Duration", 1) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *DebugInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DebugInfo{`, - `StackEntries:` + fmt.Sprintf("%v", this.StackEntries) + `,`, - `Detail:` + fmt.Sprintf("%v", this.Detail) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *QuotaFailure) String() string { - if this == nil { - return "nil" - } - repeatedStringForViolations := "[]*QuotaFailure_Violation{" - for _, f := range this.Violations { - repeatedStringForViolations += strings.Replace(fmt.Sprintf("%v", f), "QuotaFailure_Violation", "QuotaFailure_Violation", 1) + "," - } - repeatedStringForViolations += "}" - s := strings.Join([]string{`&QuotaFailure{`, - `Violations:` + repeatedStringForViolations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *QuotaFailure_Violation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QuotaFailure_Violation{`, - `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PreconditionFailure) String() string { - if this == nil { - return "nil" - } - repeatedStringForViolations := "[]*PreconditionFailure_Violation{" - for _, f := range this.Violations { - repeatedStringForViolations += strings.Replace(fmt.Sprintf("%v", f), "PreconditionFailure_Violation", "PreconditionFailure_Violation", 1) + "," - } - repeatedStringForViolations += "}" - s := strings.Join([]string{`&PreconditionFailure{`, - `Violations:` + repeatedStringForViolations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *PreconditionFailure_Violation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PreconditionFailure_Violation{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Subject:` + fmt.Sprintf("%v", this.Subject) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BadRequest) String() string { - if this == nil { - return "nil" - } - repeatedStringForFieldViolations := "[]*BadRequest_FieldViolation{" - for _, f := range this.FieldViolations { - repeatedStringForFieldViolations += strings.Replace(fmt.Sprintf("%v", f), "BadRequest_FieldViolation", "BadRequest_FieldViolation", 1) + "," - } - repeatedStringForFieldViolations += "}" - s := strings.Join([]string{`&BadRequest{`, - `FieldViolations:` + repeatedStringForFieldViolations + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *BadRequest_FieldViolation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BadRequest_FieldViolation{`, - `Field:` + fmt.Sprintf("%v", this.Field) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *RequestInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RequestInfo{`, - `RequestId:` + fmt.Sprintf("%v", this.RequestId) + `,`, - `ServingData:` + fmt.Sprintf("%v", this.ServingData) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceInfo{`, - `ResourceType:` + fmt.Sprintf("%v", this.ResourceType) + `,`, - `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, - `Owner:` + fmt.Sprintf("%v", this.Owner) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Help) String() string { - if this == nil { - return "nil" - } - repeatedStringForLinks := "[]*Help_Link{" - for _, f := range this.Links { - repeatedStringForLinks += strings.Replace(fmt.Sprintf("%v", f), "Help_Link", "Help_Link", 1) + "," - } - repeatedStringForLinks += "}" - s := strings.Join([]string{`&Help{`, - `Links:` + repeatedStringForLinks + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *Help_Link) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Help_Link{`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Url:` + fmt.Sprintf("%v", this.Url) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func (this *LocalizedMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalizedMessage{`, - `Locale:` + fmt.Sprintf("%v", this.Locale) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringErrorDetails(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *RetryInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RetryInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RetryInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryDelay", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RetryDelay == nil { - m.RetryDelay = &types.Duration{} - } - if err := m.RetryDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DebugInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DebugInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DebugInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StackEntries", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StackEntries = append(m.StackEntries, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Detail = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QuotaFailure) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QuotaFailure: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QuotaFailure: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Violations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Violations = append(m.Violations, &QuotaFailure_Violation{}) - if err := m.Violations[len(m.Violations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QuotaFailure_Violation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Violation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Violation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subject = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PreconditionFailure) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PreconditionFailure: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PreconditionFailure: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Violations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Violations = append(m.Violations, &PreconditionFailure_Violation{}) - if err := m.Violations[len(m.Violations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PreconditionFailure_Violation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Violation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Violation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subject = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BadRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BadRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BadRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldViolations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldViolations = append(m.FieldViolations, &BadRequest_FieldViolation{}) - if err := m.FieldViolations[len(m.FieldViolations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BadRequest_FieldViolation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FieldViolation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FieldViolation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Field = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequestId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServingData", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServingData = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Help) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Help: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Help: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Links = append(m.Links, &Help_Link{}) - if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Help_Link) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Link: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Url = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalizedMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalizedMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalizedMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Locale", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Locale = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthErrorDetails - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthErrorDetails - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipErrorDetails(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipErrorDetails(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowErrorDetails - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthErrorDetails - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupErrorDetails - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthErrorDetails - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthErrorDetails = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowErrorDetails = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupErrorDetails = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto b/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto deleted file mode 100644 index 0682cc97bb89..000000000000 --- a/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -import "google/protobuf/duration.proto"; - -option go_package = "rpc"; -option java_multiple_files = true; -option java_outer_classname = "ErrorDetailsProto"; -option java_package = "com.google.rpc"; -option objc_class_prefix = "RPC"; - -// Describes when the clients can retry a failed request. Clients could ignore -// the recommendation here or retry when this information is missing from error -// responses. -// -// It's always recommended that clients should use exponential backoff when -// retrying. -// -// Clients should wait until `retry_delay` amount of time has passed since -// receiving the error response before retrying. If retrying requests also -// fail, clients should use an exponential backoff scheme to gradually increase -// the delay between retries based on `retry_delay`, until either a maximum -// number of retires have been reached or a maximum retry delay cap has been -// reached. -message RetryInfo { - // Clients should wait at least this long between retrying the same request. - google.protobuf.Duration retry_delay = 1; -} - -// Describes additional debugging info. -message DebugInfo { - // The stack trace entries indicating where the error occurred. - repeated string stack_entries = 1; - - // Additional debugging information provided by the server. - string detail = 2; -} - -// Describes how a quota check failed. -// -// For example if a daily limit was exceeded for the calling project, -// a service could respond with a QuotaFailure detail containing the project -// id and the description of the quota limit that was exceeded. If the -// calling project hasn't enabled the service in the developer console, then -// a service could respond with the project id and set `service_disabled` -// to true. -// -// Also see RetryDetail and Help types for other details about handling a -// quota failure. -message QuotaFailure { - // A message type used to describe a single quota violation. For example, a - // daily quota or a custom quota that was exceeded. - message Violation { - // The subject on which the quota check failed. - // For example, "clientip:" or "project:". - string subject = 1; - - // A description of how the quota check failed. Clients can use this - // description to find more about the quota configuration in the service's - // public documentation, or find the relevant quota limit to adjust through - // developer console. - // - // For example: "Service disabled" or "Daily Limit for read operations - // exceeded". - string description = 2; - } - - // Describes all quota violations. - repeated Violation violations = 1; -} - -// Describes what preconditions have failed. -// -// For example, if an RPC failed because it required the Terms of Service to be -// acknowledged, it could list the terms of service violation in the -// PreconditionFailure message. -message PreconditionFailure { - // A message type used to describe a single precondition failure. - message Violation { - // The type of PreconditionFailure. We recommend using a service-specific - // enum type to define the supported precondition violation types. For - // example, "TOS" for "Terms of Service violation". - string type = 1; - - // The subject, relative to the type, that failed. - // For example, "google.com/cloud" relative to the "TOS" type would - // indicate which terms of service is being referenced. - string subject = 2; - - // A description of how the precondition failed. Developers can use this - // description to understand how to fix the failure. - // - // For example: "Terms of service not accepted". - string description = 3; - } - - // Describes all precondition violations. - repeated Violation violations = 1; -} - -// Describes violations in a client request. This error type focuses on the -// syntactic aspects of the request. -message BadRequest { - // A message type used to describe a single bad request field. - message FieldViolation { - // A path leading to a field in the request body. The value will be a - // sequence of dot-separated identifiers that identify a protocol buffer - // field. E.g., "field_violations.field" would identify this field. - string field = 1; - - // A description of why the request element is bad. - string description = 2; - } - - // Describes all violations in a client request. - repeated FieldViolation field_violations = 1; -} - -// Contains metadata about the request that clients can attach when filing a bug -// or providing other forms of feedback. -message RequestInfo { - // An opaque string that should only be interpreted by the service generating - // it. For example, it can be used to identify requests in the service's logs. - string request_id = 1; - - // Any data that was used to serve this request. For example, an encrypted - // stack trace that can be sent back to the service provider for debugging. - string serving_data = 2; -} - -// Describes the resource that is being accessed. -message ResourceInfo { - // A name for the type of resource being accessed, e.g. "sql table", - // "cloud storage bucket", "file", "Google calendar"; or the type URL - // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". - string resource_type = 1; - - // The name of the resource being accessed. For example, a shared calendar - // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is - // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. - string resource_name = 2; - - // The owner of the resource (optional). - // For example, "user:" or "project:". - string owner = 3; - - // Describes what error is encountered when accessing this resource. - // For example, updating a cloud project may require the `writer` permission - // on the developer console project. - string description = 4; -} - -// Provides links to documentation or for performing an out of band action. -// -// For example, if a quota check failed with an error indicating the calling -// project hasn't enabled the accessed service, this can contain a URL pointing -// directly to the right place in the developer console to flip the bit. -message Help { - // Describes a URL link. - message Link { - // Describes what the link offers. - string description = 1; - - // The URL of the link. - string url = 2; - } - - // URL(s) pointing to additional information on handling the current error. - repeated Link links = 1; -} - -// Provides a localized error message that is safe to return to the user -// which can be attached to an RPC error. -message LocalizedMessage { - // The locale used following the specification defined at - // http://www.rfc-editor.org/rfc/bcp/bcp47.txt. - // Examples are: "en-US", "fr-CH", "es-MX" - string locale = 1; - - // The localized error message in the above locale. - string message = 2; -} diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go deleted file mode 100644 index 59793ba3809c..000000000000 --- a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go +++ /dev/null @@ -1,731 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: google/rpc/status.proto - -package rpc - -import ( - bytes "bytes" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/gogo/protobuf/types" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. -type Status struct { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. - Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - Details []*types.Any `protobuf:"bytes,3,rep,name=details,proto3" json:"details,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_24d244abaf643bfe, []int{0} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Status.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return m.Size() -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} - -var xxx_messageInfo_Status proto.InternalMessageInfo - -func (m *Status) GetCode() int32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *Status) GetMessage() string { - if m != nil { - return m.Message - } - return "" -} - -func (m *Status) GetDetails() []*types.Any { - if m != nil { - return m.Details - } - return nil -} - -func (*Status) XXX_MessageName() string { - return "google.rpc.Status" -} -func init() { - proto.RegisterType((*Status)(nil), "google.rpc.Status") -} - -func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } - -var fileDescriptor_24d244abaf643bfe = []byte{ - // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, - 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, - 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, - 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, - 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, - 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, - 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, - 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xb8, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, - 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, - 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, - 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x8b, 0x2f, 0x39, 0x3f, - 0x57, 0x0f, 0xe1, 0x11, 0x27, 0x6e, 0x88, 0x5b, 0x03, 0x40, 0x56, 0x04, 0x30, 0x46, 0x31, 0x17, - 0x15, 0x24, 0x2f, 0x62, 0x62, 0x0e, 0x0a, 0x70, 0x4e, 0x62, 0x03, 0x5b, 0x6b, 0x0c, 0x08, 0x00, - 0x00, 0xff, 0xff, 0xaa, 0x06, 0xa1, 0xaa, 0x10, 0x01, 0x00, 0x00, -} - -func (this *Status) Compare(that interface{}) int { - if that == nil { - if this == nil { - return 0 - } - return 1 - } - - that1, ok := that.(*Status) - if !ok { - that2, ok := that.(Status) - if ok { - that1 = &that2 - } else { - return 1 - } - } - if that1 == nil { - if this == nil { - return 0 - } - return 1 - } else if this == nil { - return -1 - } - if this.Code != that1.Code { - if this.Code < that1.Code { - return -1 - } - return 1 - } - if this.Message != that1.Message { - if this.Message < that1.Message { - return -1 - } - return 1 - } - if len(this.Details) != len(that1.Details) { - if len(this.Details) < len(that1.Details) { - return -1 - } - return 1 - } - for i := range this.Details { - if c := this.Details[i].Compare(that1.Details[i]); c != 0 { - return c - } - } - if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { - return c - } - return 0 -} -func (this *Status) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Status) - if !ok { - that2, ok := that.(Status) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Code != that1.Code { - return false - } - if this.Message != that1.Message { - return false - } - if len(this.Details) != len(that1.Details) { - return false - } - for i := range this.Details { - if !this.Details[i].Equal(that1.Details[i]) { - return false - } - } - if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { - return false - } - return true -} -func (this *Status) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&rpc.Status{") - s = append(s, "Code: "+fmt.Sprintf("%#v", this.Code)+",\n") - s = append(s, "Message: "+fmt.Sprintf("%#v", this.Message)+",\n") - if this.Details != nil { - s = append(s, "Details: "+fmt.Sprintf("%#v", this.Details)+",\n") - } - if this.XXX_unrecognized != nil { - s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringStatus(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Details) > 0 { - for iNdEx := len(m.Details) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Details[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStatus(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Message) > 0 { - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintStatus(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - } - if m.Code != 0 { - i = encodeVarintStatus(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintStatus(dAtA []byte, offset int, v uint64) int { - offset -= sovStatus(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func NewPopulatedStatus(r randyStatus, easy bool) *Status { - this := &Status{} - this.Code = int32(r.Int31()) - if r.Intn(2) == 0 { - this.Code *= -1 - } - this.Message = string(randStringStatus(r)) - if r.Intn(5) != 0 { - v1 := r.Intn(5) - this.Details = make([]*types.Any, v1) - for i := 0; i < v1; i++ { - this.Details[i] = types.NewPopulatedAny(r, easy) - } - } - if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedStatus(r, 4) - } - return this -} - -type randyStatus interface { - Float32() float32 - Float64() float64 - Int63() int64 - Int31() int32 - Uint32() uint32 - Intn(n int) int -} - -func randUTF8RuneStatus(r randyStatus) rune { - ru := r.Intn(62) - if ru < 10 { - return rune(ru + 48) - } else if ru < 36 { - return rune(ru + 55) - } - return rune(ru + 61) -} -func randStringStatus(r randyStatus) string { - v2 := r.Intn(100) - tmps := make([]rune, v2) - for i := 0; i < v2; i++ { - tmps[i] = randUTF8RuneStatus(r) - } - return string(tmps) -} -func randUnrecognizedStatus(r randyStatus, maxFieldNumber int) (dAtA []byte) { - l := r.Intn(5) - for i := 0; i < l; i++ { - wire := r.Intn(4) - if wire == 3 { - wire = 5 - } - fieldNumber := maxFieldNumber + r.Intn(100) - dAtA = randFieldStatus(dAtA, r, fieldNumber, wire) - } - return dAtA -} -func randFieldStatus(dAtA []byte, r randyStatus, fieldNumber int, wire int) []byte { - key := uint32(fieldNumber)<<3 | uint32(wire) - switch wire { - case 0: - dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) - v3 := r.Int63() - if r.Intn(2) == 0 { - v3 *= -1 - } - dAtA = encodeVarintPopulateStatus(dAtA, uint64(v3)) - case 1: - dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - case 2: - dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) - ll := r.Intn(100) - dAtA = encodeVarintPopulateStatus(dAtA, uint64(ll)) - for j := 0; j < ll; j++ { - dAtA = append(dAtA, byte(r.Intn(256))) - } - default: - dAtA = encodeVarintPopulateStatus(dAtA, uint64(key)) - dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) - } - return dAtA -} -func encodeVarintPopulateStatus(dAtA []byte, v uint64) []byte { - for v >= 1<<7 { - dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) - v >>= 7 - } - dAtA = append(dAtA, uint8(v)) - return dAtA -} -func (m *Status) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Code != 0 { - n += 1 + sovStatus(uint64(m.Code)) - } - l = len(m.Message) - if l > 0 { - n += 1 + l + sovStatus(uint64(l)) - } - if len(m.Details) > 0 { - for _, e := range m.Details { - l = e.Size() - n += 1 + l + sovStatus(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovStatus(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStatus(x uint64) (n int) { - return sovStatus(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Status) String() string { - if this == nil { - return "nil" - } - repeatedStringForDetails := "[]*Any{" - for _, f := range this.Details { - repeatedStringForDetails += strings.Replace(fmt.Sprintf("%v", f), "Any", "types.Any", 1) + "," - } - repeatedStringForDetails += "}" - s := strings.Join([]string{`&Status{`, - `Code:` + fmt.Sprintf("%v", this.Code) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Details:` + repeatedStringForDetails + `,`, - `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, - `}`, - }, "") - return s -} -func valueToStringStatus(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStatus - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStatus - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Code |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStatus - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStatus - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStatus - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStatus - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStatus - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStatus - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Details = append(m.Details, &types.Any{}) - if err := m.Details[len(m.Details)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStatus(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStatus - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStatus - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStatus(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStatus - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStatus - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStatus - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStatus - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupStatus - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthStatus - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStatus = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupStatus = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.proto b/vendor/github.com/gogo/googleapis/google/rpc/status.proto deleted file mode 100644 index abcd4534317f..000000000000 --- a/vendor/github.com/gogo/googleapis/google/rpc/status.proto +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2017 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.rpc; - -import "google/protobuf/any.proto"; - -option go_package = "rpc"; -option java_multiple_files = true; -option java_outer_classname = "StatusProto"; -option java_package = "com.google.rpc"; -option objc_class_prefix = "RPC"; - -// The `Status` type defines a logical error model that is suitable for -// different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: -// -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. -message Status { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. - int32 code = 1; - - // A developer-facing error message, which should be in English. Any - // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. - string message = 2; - - // A list of messages that carry the error details. There is a common set of - // message types for APIs to use. - repeated google.protobuf.Any details = 3; -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE deleted file mode 100644 index b2b065037fc4..000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md deleted file mode 100644 index 3a4cc2175e52..000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/README.md +++ /dev/null @@ -1,85 +0,0 @@ -# Go gRPC Middleware - -[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware.svg?branch=master)](https://travis-ci.org/grpc-ecosystem/go-grpc-middleware) -[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-middleware)](https://goreportcard.com/report/github.com/grpc-ecosystem/go-grpc-middleware) -[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-middleware) -[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-middleware/?badge) -[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-middleware) -[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) -[![quality: production](https://img.shields.io/badge/quality-production-orange.svg)](#status) -[![Slack](https://img.shields.io/badge/slack-%23grpc--middleware-brightgreen)](https://slack.com/share/IRUQCFC23/9Tm7hxRFVKKNoajQfMOcUiIk/enQtODc4ODI4NTIyMDcxLWM5NDA0ZTE4Njg5YjRjYWZkMTI5MzQwNDY3YzBjMzE1YzdjOGM5ZjI1NDNiM2JmNzI2YjM5ODE5OTRiNTEyOWE) - -[gRPC Go](https://github.com/grpc/grpc-go) Middleware: interceptors, helpers, utilities. - -## Middleware - -[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for -Interceptors, i.e. [middleware](https://medium.com/@matryer/writing-middleware-in-golang-and-how-go-makes-it-so-much-fun-4375c1246e81#.gv7tdlghs) -that is executed either on the gRPC Server before the request is passed onto the user's application logic, or on the gRPC client either around the user call. It is a perfect way to implement -common patterns: auth, logging, message, validation, retries or monitoring. - -These are generic building blocks that make it easy to build multiple microservices easily. -The purpose of this repository is to act as a go-to point for such reusable functionality. It contains -some of them itself, but also will link to useful external repos. - -`grpc_middleware` itself provides support for chaining interceptors, here's an example: - -```go -import "github.com/grpc-ecosystem/go-grpc-middleware" - -myServer := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer( - grpc_ctxtags.StreamServerInterceptor(), - grpc_opentracing.StreamServerInterceptor(), - grpc_prometheus.StreamServerInterceptor, - grpc_zap.StreamServerInterceptor(zapLogger), - grpc_auth.StreamServerInterceptor(myAuthFunction), - grpc_recovery.StreamServerInterceptor(), - )), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer( - grpc_ctxtags.UnaryServerInterceptor(), - grpc_opentracing.UnaryServerInterceptor(), - grpc_prometheus.UnaryServerInterceptor, - grpc_zap.UnaryServerInterceptor(zapLogger), - grpc_auth.UnaryServerInterceptor(myAuthFunction), - grpc_recovery.UnaryServerInterceptor(), - )), -) -``` - -## Interceptors - -*Please send a PR to add new interceptors or middleware to this list* - -#### Auth - * [`grpc_auth`](auth) - a customizable (via `AuthFunc`) piece of auth middleware - -#### Logging - * [`grpc_ctxtags`](tags/) - a library that adds a `Tag` map to context, with data populated from request body - * [`grpc_zap`](logging/zap/) - integration of [zap](https://github.com/uber-go/zap) logging library into gRPC handlers. - * [`grpc_logrus`](logging/logrus/) - integration of [logrus](https://github.com/sirupsen/logrus) logging library into gRPC handlers. - * [`grpc_kit`](logging/kit/) - integration of [go-kit](https://github.com/go-kit/kit/tree/master/log) logging library into gRPC handlers. - -#### Monitoring - * [`grpc_prometheus`⚡](https://github.com/grpc-ecosystem/go-grpc-prometheus) - Prometheus client-side and server-side monitoring middleware - * [`otgrpc`⚡](https://github.com/grpc-ecosystem/grpc-opentracing/tree/master/go/otgrpc) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors - * [`grpc_opentracing`](tracing/opentracing) - [OpenTracing](http://opentracing.io/) client-side and server-side interceptors with support for streaming and handler-returned tags - -#### Client - * [`grpc_retry`](retry/) - a generic gRPC response code retry mechanism, client-side middleware - -#### Server - * [`grpc_validator`](validator/) - codegen inbound message validation from `.proto` options - * [`grpc_recovery`](recovery/) - turn panics into gRPC errors - * [`ratelimit`](ratelimit/) - grpc rate limiting by your own limiter - - -## Status - -This code has been running in *production* since May 2016 as the basis of the gRPC micro services stack at [Improbable](https://improbable.io). - -Additional tooling will be added, and contributions are welcome. - -## License - -`go-grpc-middleware` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go deleted file mode 100644 index ea3738b896cf..000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/chain.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -// gRPC Server Interceptor chaining middleware. - -package grpc_middleware - -import ( - "context" - - "google.golang.org/grpc" -) - -// ChainUnaryServer creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three, and three -// will see context changes of one and two. -func ChainUnaryServer(interceptors ...grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { - n := len(interceptors) - - return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - chainer := func(currentInter grpc.UnaryServerInterceptor, currentHandler grpc.UnaryHandler) grpc.UnaryHandler { - return func(currentCtx context.Context, currentReq interface{}) (interface{}, error) { - return currentInter(currentCtx, currentReq, info, currentHandler) - } - } - - chainedHandler := handler - for i := n - 1; i >= 0; i-- { - chainedHandler = chainer(interceptors[i], chainedHandler) - } - - return chainedHandler(ctx, req) - } -} - -// ChainStreamServer creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryServer(one, two, three) will execute one before two before three. -// If you want to pass context between interceptors, use WrapServerStream. -func ChainStreamServer(interceptors ...grpc.StreamServerInterceptor) grpc.StreamServerInterceptor { - n := len(interceptors) - - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - chainer := func(currentInter grpc.StreamServerInterceptor, currentHandler grpc.StreamHandler) grpc.StreamHandler { - return func(currentSrv interface{}, currentStream grpc.ServerStream) error { - return currentInter(currentSrv, currentStream, info, currentHandler) - } - } - - chainedHandler := handler - for i := n - 1; i >= 0; i-- { - chainedHandler = chainer(interceptors[i], chainedHandler) - } - - return chainedHandler(srv, ss) - } -} - -// ChainUnaryClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainUnaryClient(one, two, three) will execute one before two before three. -func ChainUnaryClient(interceptors ...grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { - n := len(interceptors) - - return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - chainer := func(currentInter grpc.UnaryClientInterceptor, currentInvoker grpc.UnaryInvoker) grpc.UnaryInvoker { - return func(currentCtx context.Context, currentMethod string, currentReq, currentRepl interface{}, currentConn *grpc.ClientConn, currentOpts ...grpc.CallOption) error { - return currentInter(currentCtx, currentMethod, currentReq, currentRepl, currentConn, currentInvoker, currentOpts...) - } - } - - chainedInvoker := invoker - for i := n - 1; i >= 0; i-- { - chainedInvoker = chainer(interceptors[i], chainedInvoker) - } - - return chainedInvoker(ctx, method, req, reply, cc, opts...) - } -} - -// ChainStreamClient creates a single interceptor out of a chain of many interceptors. -// -// Execution is done in left-to-right order, including passing of context. -// For example ChainStreamClient(one, two, three) will execute one before two before three. -func ChainStreamClient(interceptors ...grpc.StreamClientInterceptor) grpc.StreamClientInterceptor { - n := len(interceptors) - - return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - chainer := func(currentInter grpc.StreamClientInterceptor, currentStreamer grpc.Streamer) grpc.Streamer { - return func(currentCtx context.Context, currentDesc *grpc.StreamDesc, currentConn *grpc.ClientConn, currentMethod string, currentOpts ...grpc.CallOption) (grpc.ClientStream, error) { - return currentInter(currentCtx, currentDesc, currentConn, currentMethod, currentStreamer, currentOpts...) - } - } - - chainedStreamer := streamer - for i := n - 1; i >= 0; i-- { - chainedStreamer = chainer(interceptors[i], chainedStreamer) - } - - return chainedStreamer(ctx, desc, cc, method, opts...) - } -} - -// Chain creates a single interceptor out of a chain of many interceptors. -// -// WithUnaryServerChain is a grpc.Server config option that accepts multiple unary interceptors. -// Basically syntactic sugar. -func WithUnaryServerChain(interceptors ...grpc.UnaryServerInterceptor) grpc.ServerOption { - return grpc.UnaryInterceptor(ChainUnaryServer(interceptors...)) -} - -// WithStreamServerChain is a grpc.Server config option that accepts multiple stream interceptors. -// Basically syntactic sugar. -func WithStreamServerChain(interceptors ...grpc.StreamServerInterceptor) grpc.ServerOption { - return grpc.StreamInterceptor(ChainStreamServer(interceptors...)) -} diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go deleted file mode 100644 index 716895036423..000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/doc.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -/* -`grpc_middleware` is a collection of gRPC middleware packages: interceptors, helpers and tools. - -Middleware - -gRPC is a fantastic RPC middleware, which sees a lot of adoption in the Golang world. However, the -upstream gRPC codebase is relatively bare bones. - -This package, and most of its child packages provides commonly needed middleware for gRPC: -client-side interceptors for retires, server-side interceptors for input validation and auth, -functions for chaining said interceptors, metadata convenience methods and more. - -Chaining - -By default, gRPC doesn't allow one to have more than one interceptor either on the client nor on -the server side. `grpc_middleware` provides convenient chaining methods - -Simple way of turning a multiple interceptors into a single interceptor. Here's an example for -server chaining: - - myServer := grpc.NewServer( - grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(loggingStream, monitoringStream, authStream)), - grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(loggingUnary, monitoringUnary, authUnary), - ) - -These interceptors will be executed from left to right: logging, monitoring and auth. - -Here's an example for client side chaining: - - clientConn, err = grpc.Dial( - address, - grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(monitoringClientUnary, retryUnary)), - grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(monitoringClientStream, retryStream)), - ) - client = pb_testproto.NewTestServiceClient(clientConn) - resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) - -These interceptors will be executed from left to right: monitoring and then retry logic. - -The retry interceptor will call every interceptor that follows it whenever when a retry happens. - -Writing Your Own - -Implementing your own interceptor is pretty trivial: there are interfaces for that. But the interesting -bit exposing common data to handlers (and other middleware), similarly to HTTP Middleware design. -For example, you may want to pass the identity of the caller from the auth interceptor all the way -to the handling function. - -For example, a client side interceptor example for auth looks like: - - func FakeAuthUnaryInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { - newCtx := context.WithValue(ctx, "user_id", "john@example.com") - return handler(newCtx, req) - } - -Unfortunately, it's not as easy for streaming RPCs. These have the `context.Context` embedded within -the `grpc.ServerStream` object. To pass values through context, a wrapper (`WrappedServerStream`) is -needed. For example: - - func FakeAuthStreamingInterceptor(srv interface{}, stream grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - newStream := grpc_middleware.WrapServerStream(stream) - newStream.WrappedContext = context.WithValue(ctx, "user_id", "john@example.com") - return handler(srv, stream) - } -*/ -package grpc_middleware diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod deleted file mode 100644 index 6f8eeac43d2b..000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/go.mod +++ /dev/null @@ -1,22 +0,0 @@ -module github.com/grpc-ecosystem/go-grpc-middleware - -require ( - github.com/go-kit/kit v0.9.0 - github.com/go-logfmt/logfmt v0.4.0 // indirect - github.com/go-stack/stack v1.8.0 // indirect - github.com/gogo/protobuf v1.2.1 - github.com/golang/protobuf v1.3.2 - github.com/opentracing/opentracing-go v1.1.0 - github.com/pkg/errors v0.8.1 // indirect - github.com/sirupsen/logrus v1.4.2 - github.com/stretchr/testify v1.4.0 - go.uber.org/atomic v1.4.0 // indirect - go.uber.org/multierr v1.1.0 // indirect - go.uber.org/zap v1.10.0 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be - google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 // indirect - google.golang.org/grpc v1.19.0 -) - -go 1.13 diff --git a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go b/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go deleted file mode 100644 index 05ccfb3f24a7..000000000000 --- a/vendor/github.com/grpc-ecosystem/go-grpc-middleware/wrappers.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2016 Michal Witkowski. All Rights Reserved. -// See LICENSE for licensing terms. - -package grpc_middleware - -import ( - "context" - - "google.golang.org/grpc" -) - -// WrappedServerStream is a thin wrapper around grpc.ServerStream that allows modifying context. -type WrappedServerStream struct { - grpc.ServerStream - // WrappedContext is the wrapper's own Context. You can assign it. - WrappedContext context.Context -} - -// Context returns the wrapper's WrappedContext, overwriting the nested grpc.ServerStream.Context() -func (w *WrappedServerStream) Context() context.Context { - return w.WrappedContext -} - -// WrapServerStream returns a ServerStream that has the ability to overwrite context. -func WrapServerStream(stream grpc.ServerStream) *WrappedServerStream { - if existing, ok := stream.(*WrappedServerStream); ok { - return existing - } - return &WrappedServerStream{ServerStream: stream, WrappedContext: stream.Context()} -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md deleted file mode 100644 index 78c49dbbeaad..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# OpenTracing support for gRPC in Go - -The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based -systems in Go. - -## Installation - -``` -go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc -``` - -## Documentation - -See the basic usage examples below and the [package documentation on -godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc). - -## Client-side usage example - -Wherever you call `grpc.Dial`: - -```go -// You must have some sort of OpenTracing Tracer instance on hand. -var tracer opentracing.Tracer = ... -... - -// Set up a connection to the server peer. -conn, err := grpc.Dial( - address, - ... // other options - grpc.WithUnaryInterceptor( - otgrpc.OpenTracingClientInterceptor(tracer)), - grpc.WithStreamInterceptor( - otgrpc.OpenTracingStreamClientInterceptor(tracer))) - -// All future RPC activity involving `conn` will be automatically traced. -``` - -## Server-side usage example - -Wherever you call `grpc.NewServer`: - -```go -// You must have some sort of OpenTracing Tracer instance on hand. -var tracer opentracing.Tracer = ... -... - -// Initialize the gRPC server. -s := grpc.NewServer( - ... // other options - grpc.UnaryInterceptor( - otgrpc.OpenTracingServerInterceptor(tracer)), - grpc.StreamInterceptor( - otgrpc.OpenTracingStreamServerInterceptor(tracer))) - -// All future RPC activity involving `s` will be automatically traced. -``` - diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go deleted file mode 100644 index 3414e55cb1f7..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go +++ /dev/null @@ -1,239 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - "io" - "runtime" - "sync/atomic" -) - -// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. -// -// For example: -// -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) -// -// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC -// metadata; they will also look in the context.Context for an active -// in-process parent Span and establish a ChildOf reference if such a parent -// Span could be found. -func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - method string, - req, resp interface{}, - cc *grpc.ClientConn, - invoker grpc.UnaryInvoker, - opts ...grpc.CallOption, - ) error { - var err error - var parentCtx opentracing.SpanContext - if parent := opentracing.SpanFromContext(ctx); parent != nil { - parentCtx = parent.Context() - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) { - return invoker(ctx, method, req, resp, cc, opts...) - } - clientSpan := tracer.StartSpan( - method, - opentracing.ChildOf(parentCtx), - ext.SpanKindRPCClient, - gRPCComponentTag, - ) - defer clientSpan.Finish() - ctx = injectSpanContext(ctx, tracer, clientSpan) - if otgrpcOpts.logPayloads { - clientSpan.LogFields(log.Object("gRPC request", req)) - } - err = invoker(ctx, method, req, resp, cc, opts...) - if err == nil { - if otgrpcOpts.logPayloads { - clientSpan.LogFields(log.Object("gRPC response", resp)) - } - } else { - SetSpanTags(clientSpan, err, true) - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(clientSpan, method, req, resp, err) - } - return err - } -} - -// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating -// a single span to correspond to the lifetime of the RPC's stream. -// -// For example: -// -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) -// -// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC -// metadata; they will also look in the context.Context for an active -// in-process parent Span and establish a ChildOf reference if such a parent -// Span could be found. -func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - desc *grpc.StreamDesc, - cc *grpc.ClientConn, - method string, - streamer grpc.Streamer, - opts ...grpc.CallOption, - ) (grpc.ClientStream, error) { - var err error - var parentCtx opentracing.SpanContext - if parent := opentracing.SpanFromContext(ctx); parent != nil { - parentCtx = parent.Context() - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { - return streamer(ctx, desc, cc, method, opts...) - } - - clientSpan := tracer.StartSpan( - method, - opentracing.ChildOf(parentCtx), - ext.SpanKindRPCClient, - gRPCComponentTag, - ) - ctx = injectSpanContext(ctx, tracer, clientSpan) - cs, err := streamer(ctx, desc, cc, method, opts...) - if err != nil { - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - SetSpanTags(clientSpan, err, true) - clientSpan.Finish() - return cs, err - } - return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil - } -} - -func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { - finishChan := make(chan struct{}) - - isFinished := new(int32) - *isFinished = 0 - finishFunc := func(err error) { - // The current OpenTracing specification forbids finishing a span more than - // once. Since we have multiple code paths that could concurrently call - // `finishFunc`, we need to add some sort of synchronization to guard against - // multiple finishing. - if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { - return - } - close(finishChan) - defer clientSpan.Finish() - if err != nil { - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - SetSpanTags(clientSpan, err, true) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(clientSpan, method, nil, nil, err) - } - } - go func() { - select { - case <-finishChan: - // The client span is being finished by another code path; hence, no - // action is necessary. - case <-cs.Context().Done(): - finishFunc(cs.Context().Err()) - } - }() - otcs := &openTracingClientStream{ - ClientStream: cs, - desc: desc, - finishFunc: finishFunc, - } - - // The `ClientStream` interface allows one to omit calling `Recv` if it's - // known that the result will be `io.EOF`. See - // http://stackoverflow.com/q/42915337 - // In such cases, there's nothing that triggers the span to finish. We, - // therefore, set a finalizer so that the span and the context goroutine will - // at least be cleaned up when the garbage collector is run. - runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { - otcs.finishFunc(nil) - }) - return otcs -} - -type openTracingClientStream struct { - grpc.ClientStream - desc *grpc.StreamDesc - finishFunc func(error) -} - -func (cs *openTracingClientStream) Header() (metadata.MD, error) { - md, err := cs.ClientStream.Header() - if err != nil { - cs.finishFunc(err) - } - return md, err -} - -func (cs *openTracingClientStream) SendMsg(m interface{}) error { - err := cs.ClientStream.SendMsg(m) - if err != nil { - cs.finishFunc(err) - } - return err -} - -func (cs *openTracingClientStream) RecvMsg(m interface{}) error { - err := cs.ClientStream.RecvMsg(m) - if err == io.EOF { - cs.finishFunc(nil) - return err - } else if err != nil { - cs.finishFunc(err) - return err - } - if !cs.desc.ServerStreams { - cs.finishFunc(nil) - } - return err -} - -func (cs *openTracingClientStream) CloseSend() error { - err := cs.ClientStream.CloseSend() - if err != nil { - cs.finishFunc(err) - } - return err -} - -func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - md = metadata.New(nil) - } else { - md = md.Copy() - } - mdWriter := metadataReaderWriter{md} - err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) - // We have no better place to record an error than the Span itself :-/ - if err != nil { - clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) - } - return metadata.NewOutgoingContext(ctx, md) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go deleted file mode 100644 index 41a6346f2504..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go +++ /dev/null @@ -1,69 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// A Class is a set of types of outcomes (including errors) that will often -// be handled in the same way. -type Class string - -const ( - Unknown Class = "0xx" - // Success represents outcomes that achieved the desired results. - Success Class = "2xx" - // ClientError represents errors that were the client's fault. - ClientError Class = "4xx" - // ServerError represents errors that were the server's fault. - ServerError Class = "5xx" -) - -// ErrorClass returns the class of the given error -func ErrorClass(err error) Class { - if s, ok := status.FromError(err); ok { - switch s.Code() { - // Success or "success" - case codes.OK, codes.Canceled: - return Success - - // Client errors - case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, - codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition, - codes.OutOfRange: - return ClientError - - // Server errors - case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, - codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss: - return ServerError - - // Not sure - case codes.Unknown: - fallthrough - default: - return Unknown - } - } - return Unknown -} - -// SetSpanTags sets one or more tags on the given span according to the -// error. -func SetSpanTags(span opentracing.Span, err error, client bool) { - c := ErrorClass(err) - code := codes.Unknown - if s, ok := status.FromError(err); ok { - code = s.Code() - } - span.SetTag("response_code", code) - span.SetTag("response_class", c) - if err == nil { - return - } - if client || c == ServerError { - ext.Error.Set(span, true) - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go deleted file mode 100644 index 903e8382e3f0..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go +++ /dev/null @@ -1,76 +0,0 @@ -package otgrpc - -import "github.com/opentracing/opentracing-go" - -// Option instances may be used in OpenTracing(Server|Client)Interceptor -// initialization. -// -// See this post about the "functional options" pattern: -// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis -type Option func(o *options) - -// LogPayloads returns an Option that tells the OpenTracing instrumentation to -// try to log application payloads in both directions. -func LogPayloads() Option { - return func(o *options) { - o.logPayloads = true - } -} - -// SpanInclusionFunc provides an optional mechanism to decide whether or not -// to trace a given gRPC call. Return true to create a Span and initiate -// tracing, false to not create a Span and not trace. -// -// parentSpanCtx may be nil if no parent could be extraction from either the Go -// context.Context (on the client) or the RPC (on the server). -type SpanInclusionFunc func( - parentSpanCtx opentracing.SpanContext, - method string, - req, resp interface{}) bool - -// IncludingSpans binds a IncludeSpanFunc to the options -func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { - return func(o *options) { - o.inclusionFunc = inclusionFunc - } -} - -// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add -// arbitrary tags/logs/etc to the opentracing.Span associated with client -// and/or server RPCs. -type SpanDecoratorFunc func( - span opentracing.Span, - method string, - req, resp interface{}, - grpcError error) - -// SpanDecorator binds a function that decorates gRPC Spans. -func SpanDecorator(decorator SpanDecoratorFunc) Option { - return func(o *options) { - o.decorator = decorator - } -} - -// The internal-only options struct. Obviously overkill at the moment; but will -// scale well as production use dictates other configuration and tuning -// parameters. -type options struct { - logPayloads bool - decorator SpanDecoratorFunc - // May be nil. - inclusionFunc SpanInclusionFunc -} - -// newOptions returns the default options. -func newOptions() *options { - return &options{ - logPayloads: false, - inclusionFunc: nil, - } -} - -func (o *options) apply(opts ...Option) { - for _, opt := range opts { - opt(o) - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go deleted file mode 100644 index 4ff3d199783f..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package otgrpc provides OpenTracing support for any gRPC client or server. -// -// See the README for simple usage examples: -// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md -package otgrpc diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go deleted file mode 100644 index 62cf54d22176..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go +++ /dev/null @@ -1,141 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable -// for use in a grpc.NewServer call. -// -// For example: -// -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) -// -// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC -// metadata; if found, the server span will act as the ChildOf that RPC -// SpanContext. -// -// Root or not, the server Span will be embedded in the context.Context for the -// application-specific gRPC handler(s) to access. -func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - req interface{}, - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler, - ) (resp interface{}, err error) { - spanContext, err := extractSpanContext(ctx, tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { - return handler(ctx, req) - } - serverSpan := tracer.StartSpan( - info.FullMethod, - ext.RPCServerOption(spanContext), - gRPCComponentTag, - ) - defer serverSpan.Finish() - - ctx = opentracing.ContextWithSpan(ctx, serverSpan) - if otgrpcOpts.logPayloads { - serverSpan.LogFields(log.Object("gRPC request", req)) - } - resp, err = handler(ctx, req) - if err == nil { - if otgrpcOpts.logPayloads { - serverSpan.LogFields(log.Object("gRPC response", resp)) - } - } else { - SetSpanTags(serverSpan, err, false) - serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err) - } - return resp, err - } -} - -// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable -// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by -// creating a single span to correspond to the lifetime of the RPC's stream. -// -// For example: -// -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) -// -// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC -// metadata; if found, the server span will act as the ChildOf that RPC -// SpanContext. -// -// Root or not, the server Span will be embedded in the context.Context for the -// application-specific gRPC handler(s) to access. -func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - spanContext, err := extractSpanContext(ss.Context(), tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { - return handler(srv, ss) - } - - serverSpan := tracer.StartSpan( - info.FullMethod, - ext.RPCServerOption(spanContext), - gRPCComponentTag, - ) - defer serverSpan.Finish() - ss = &openTracingServerStream{ - ServerStream: ss, - ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), - } - err = handler(srv, ss) - if err != nil { - SetSpanTags(serverSpan, err, false) - serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) - } - return err - } -} - -type openTracingServerStream struct { - grpc.ServerStream - ctx context.Context -} - -func (ss *openTracingServerStream) Context() context.Context { - return ss.ctx -} - -func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - md = metadata.New(nil) - } - return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go deleted file mode 100644 index 9abd5eaa6290..000000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go +++ /dev/null @@ -1,42 +0,0 @@ -package otgrpc - -import ( - "strings" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "google.golang.org/grpc/metadata" -) - -var ( - // Morally a const: - gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} -) - -// metadataReaderWriter satisfies both the opentracing.TextMapReader and -// opentracing.TextMapWriter interfaces. -type metadataReaderWriter struct { - metadata.MD -} - -func (w metadataReaderWriter) Set(key, val string) { - // The GRPC HPACK implementation rejects any uppercase keys here. - // - // As such, since the HTTP_HEADERS format is case-insensitive anyway, we - // blindly lowercase the key (which is guaranteed to work in the - // Inject/Extract sense per the OpenTracing spec). - key = strings.ToLower(key) - w.MD[key] = append(w.MD[key], val) -} - -func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { - for k, vals := range w.MD { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/jaguilar/vt100/LICENSE b/vendor/github.com/jaguilar/vt100/LICENSE deleted file mode 100644 index 50d8a71f0679..000000000000 --- a/vendor/github.com/jaguilar/vt100/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 James Aguilar - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/github.com/jaguilar/vt100/README.md b/vendor/github.com/jaguilar/vt100/README.md deleted file mode 100644 index 4df698521066..000000000000 --- a/vendor/github.com/jaguilar/vt100/README.md +++ /dev/null @@ -1,54 +0,0 @@ -#VT100 - -[![Build Status](https://travis-ci.org/jaguilar/vt100.svg?branch=master)](https://travis-ci.org/jaguilar/vt100) - -[![GoDoc](https://godoc.org/github.com/jaguilar/vt100?status.svg)](https://godoc.org/github.com/jaguilar/vt100) - -This is a vt100 screen reader. It seems to do a pretty -decent job of parsing the nethack input stream, which -is all I want it for anyway. - -Here is a screenshot of the HTML-formatted screen data: - -![](_readme/screencap.png) - -The features we currently support: - -* Cursor movement -* Erasing -* Many of the text properties -- underline, inverse, blink, etc. -* Sixteen colors -* Cursor saving and unsaving -* UTF-8 - -Not currently supported (and no plans to support): - -* Scrolling -* Prompts -* Other cooked mode features - -The API is not stable! This is a v0 package. - -## Demo - -Try running the demo! Install nethack: - - sudo apt-get install nethack - -Get this code: - - go get github.com/jaguilar/vt100 - cd $GOPATH/src/githib.com/jaguilar/vt100 - -Run this code: - - go run demo/demo.go -port=8080 2>/tmp/error.txt - -Play some nethack and check out the resulting VT100 terminal status: - - # From another terminal . . . - xdg-open http://localhost:8080/debug/vt100 - -The demo probably assumes Linux (it uses pty-related syscalls). I'll happily -accept pull requests that replicate the pty-spawning functions on OSX and -Windows. \ No newline at end of file diff --git a/vendor/github.com/jaguilar/vt100/command.go b/vendor/github.com/jaguilar/vt100/command.go deleted file mode 100644 index c2386544ad41..000000000000 --- a/vendor/github.com/jaguilar/vt100/command.go +++ /dev/null @@ -1,288 +0,0 @@ -package vt100 - -import ( - "errors" - "expvar" - "fmt" - "image/color" - "regexp" - "strconv" - "strings" -) - -// UnsupportedError indicates that we parsed an operation that this -// terminal does not implement. Such errors indicate that the client -// program asked us to perform an action that we don't know how to. -// It MAY be safe to continue trying to do additional operations. -// This is a distinct category of errors from things we do know how -// to do, but are badly encoded, or errors from the underlying io.RuneScanner -// that we're reading commands from. -type UnsupportedError struct { - error -} - -var ( - supportErrors = expvar.NewMap("vt100-unsupported-operations") -) - -func supportError(e error) error { - supportErrors.Add(e.Error(), 1) - return UnsupportedError{e} -} - -// Command is a type of object that the terminal can process to perform -// an update. -type Command interface { - display(v *VT100) error -} - -// runeCommand is a simple command that just writes a rune -// to the current cell and advances the cursor. -type runeCommand rune - -func (r runeCommand) display(v *VT100) error { - v.put(rune(r)) - return nil -} - -// escapeCommand is a control sequence command. It includes a variety -// of control and escape sequences that move and modify the cursor -// or the terminal. -type escapeCommand struct { - cmd rune - args string -} - -func (c escapeCommand) String() string { - return fmt.Sprintf("[%q %U](%v)", c.cmd, c.cmd, c.args) -} - -type intHandler func(*VT100, []int) error - -var ( - // intHandlers are handlers for which all arguments are numbers. - // This is most of them -- all the ones that we process. Eventually, - // we may add handlers that support non-int args. Those handlers - // will instead receive []string, and they'll have to choose on their - // own how they might be parsed. - intHandlers = map[rune]intHandler{ - 's': save, - '7': save, - 'u': unsave, - '8': unsave, - 'A': relativeMove(-1, 0), - 'B': relativeMove(1, 0), - 'C': relativeMove(0, 1), - 'D': relativeMove(0, -1), - 'K': eraseColumns, - 'J': eraseLines, - 'H': home, - 'f': home, - 'm': updateAttributes, - } -) - -func save(v *VT100, _ []int) error { - v.save() - return nil -} - -func unsave(v *VT100, _ []int) error { - v.unsave() - return nil -} - -var ( - codeColors = []color.RGBA{ - Black, - Red, - Green, - Yellow, - Blue, - Magenta, - Cyan, - White, - {}, // Not used. - DefaultColor, - } -) - -// A command to update the attributes of the cursor based on the arg list. -func updateAttributes(v *VT100, args []int) error { - f := &v.Cursor.F - - var unsupported []int - for _, x := range args { - switch x { - case 0: - *f = Format{} - case 1: - f.Intensity = Bright - case 2: - f.Intensity = Dim - case 22: - f.Intensity = Normal - case 4: - f.Underscore = true - case 24: - f.Underscore = false - case 5, 6: - f.Blink = true // We don't distinguish between blink speeds. - case 25: - f.Blink = false - case 7: - f.Inverse = true - case 27: - f.Inverse = false - case 8: - f.Conceal = true - case 28: - f.Conceal = false - case 30, 31, 32, 33, 34, 35, 36, 37, 39: - f.Fg = codeColors[x-30] - case 40, 41, 42, 43, 44, 45, 46, 47, 49: - f.Bg = codeColors[x-40] - // 38 and 48 not supported. Maybe someday. - default: - unsupported = append(unsupported, x) - } - } - - if unsupported != nil { - return supportError(fmt.Errorf("unknown attributes: %v", unsupported)) - } - return nil -} - -func relativeMove(y, x int) func(*VT100, []int) error { - return func(v *VT100, args []int) error { - c := 1 - if len(args) >= 1 { - c = args[0] - } - // home is 1-indexed, because that's what the terminal sends us. We want to - // reuse its sanitization scheme, so we'll just modify our args by that amount. - return home(v, []int{v.Cursor.Y + y*c + 1, v.Cursor.X + x*c + 1}) - } -} - -func eraseColumns(v *VT100, args []int) error { - d := eraseForward - if len(args) > 0 { - d = eraseDirection(args[0]) - } - if d > eraseAll { - return fmt.Errorf("unknown erase direction: %d", d) - } - v.eraseColumns(d) - return nil -} - -func eraseLines(v *VT100, args []int) error { - d := eraseForward - if len(args) > 0 { - d = eraseDirection(args[0]) - } - if d > eraseAll { - return fmt.Errorf("unknown erase direction: %d", d) - } - v.eraseLines(d) - return nil -} - -func sanitize(v *VT100, y, x int) (int, int, error) { - var err error - if y < 0 || y >= v.Height || x < 0 || x >= v.Width { - err = fmt.Errorf("out of bounds (%d, %d)", y, x) - } else { - return y, x, nil - } - - if y < 0 { - y = 0 - } - if y >= v.Height { - y = v.Height - 1 - } - if x < 0 { - x = 0 - } - if x >= v.Width { - x = v.Width - 1 - } - return y, x, err -} - -func home(v *VT100, args []int) error { - var y, x int - if len(args) >= 2 { - y, x = args[0]-1, args[1]-1 // home args are 1-indexed. - } - y, x, err := sanitize(v, y, x) // Clamp y and x to the bounds of the terminal. - v.home(y, x) // Try to do something like what the client asked. - return err -} - -func (c escapeCommand) display(v *VT100) error { - f, ok := intHandlers[c.cmd] - if !ok { - return supportError(c.err(errors.New("unsupported command"))) - } - - args, err := c.argInts() - if err != nil { - return c.err(fmt.Errorf("while parsing int args: %v", err)) - } - - return f(v, args) -} - -// err enhances e with information about the current escape command -func (c escapeCommand) err(e error) error { - return fmt.Errorf("%s: %s", c, e) -} - -var csArgsRe = regexp.MustCompile("^([^0-9]*)(.*)$") - -// argInts parses c.args as a slice of at least arity ints. If the number -// of ; separated arguments is less than arity, the remaining elements of -// the result will be zero. errors only on integer parsing failure. -func (c escapeCommand) argInts() ([]int, error) { - if len(c.args) == 0 { - return make([]int, 0), nil - } - args := strings.Split(c.args, ";") - out := make([]int, len(args)) - for i, s := range args { - x, err := strconv.ParseInt(s, 10, 0) - if err != nil { - return nil, err - } - out[i] = int(x) - } - return out, nil -} - -type controlCommand rune - -const ( - backspace controlCommand = '\b' - _horizontalTab = '\t' - linefeed = '\n' - _verticalTab = '\v' - _formfeed = '\f' - carriageReturn = '\r' -) - -func (c controlCommand) display(v *VT100) error { - switch c { - case backspace: - v.backspace() - case linefeed: - v.Cursor.Y++ - v.Cursor.X = 0 - case carriageReturn: - v.Cursor.X = 0 - } - return nil -} diff --git a/vendor/github.com/jaguilar/vt100/go.mod b/vendor/github.com/jaguilar/vt100/go.mod deleted file mode 100644 index 938cab63c9bd..000000000000 --- a/vendor/github.com/jaguilar/vt100/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/jaguilar/vt100 - -go 1.12 - -require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/jaguilar/vt100/scanner.go b/vendor/github.com/jaguilar/vt100/scanner.go deleted file mode 100644 index 19a4b97f362a..000000000000 --- a/vendor/github.com/jaguilar/vt100/scanner.go +++ /dev/null @@ -1,97 +0,0 @@ -package vt100 - -import ( - "bytes" - "fmt" - "io" - "unicode" -) - -// Decode decodes one ANSI terminal command from s. -// -// s should be connected to a client program that expects an -// ANSI terminal on the other end. It will push bytes to us that we are meant -// to intepret as terminal control codes, or text to place onto the terminal. -// -// This Command alone does not actually update the terminal. You need to pass -// it to VT100.Process(). -// -// You should not share s with any other reader, because it could leave -// the stream in an invalid state. -func Decode(s io.RuneScanner) (Command, error) { - r, size, err := s.ReadRune() - if err != nil { - return nil, err - } - - if r == unicode.ReplacementChar && size == 1 { - return nil, fmt.Errorf("non-utf8 data from reader") - } - - if r == escape || r == monogramCsi { // At beginning of escape sequence. - s.UnreadRune() - return scanEscapeCommand(s) - } - - if unicode.IsControl(r) { - return controlCommand(r), nil - } - - return runeCommand(r), nil -} - -const ( - // There are two ways to begin an escape sequence. One is to put the escape byte. - // The other is to put the single-rune control sequence indicator, which is equivalent - // to putting "\u001b[". - escape = '\u001b' - monogramCsi = '\u009b' -) - -var ( - csEnd = &unicode.RangeTable{R16: []unicode.Range16{{Lo: 64, Hi: 126, Stride: 1}}} -) - -// scanEscapeCommand scans to the end of the current escape sequence. The scanner -// must be positioned at an escape rune (esc or the unicode CSI). -func scanEscapeCommand(s io.RuneScanner) (Command, error) { - csi := false - esc, _, err := s.ReadRune() - if err != nil { - return nil, err - } - if esc != escape && esc != monogramCsi { - return nil, fmt.Errorf("invalid content") - } - if esc == monogramCsi { - csi = true - } - - var args bytes.Buffer - quote := false - for i := 0; ; i++ { - r, _, err := s.ReadRune() - if err != nil { - return nil, err - } - if i == 0 && r == '[' { - csi = true - continue - } - - if !csi { - return escapeCommand{r, ""}, nil - } else if quote == false && unicode.Is(csEnd, r) { - return escapeCommand{r, args.String()}, nil - } - - if r == '"' { - quote = !quote - } - - // Otherwise, we're still in the args, and this rune is one of those args. - if _, err := args.WriteRune(r); err != nil { - panic(err) // WriteRune cannot return an error from bytes.Buffer. - } - } -} diff --git a/vendor/github.com/jaguilar/vt100/vt100.go b/vendor/github.com/jaguilar/vt100/vt100.go deleted file mode 100644 index 5bf7df968bd5..000000000000 --- a/vendor/github.com/jaguilar/vt100/vt100.go +++ /dev/null @@ -1,435 +0,0 @@ -// package vt100 implements a quick-and-dirty programmable ANSI terminal emulator. -// -// You could, for example, use it to run a program like nethack that expects -// a terminal as a subprocess. It tracks the position of the cursor, -// colors, and various other aspects of the terminal's state, and -// allows you to inspect them. -// -// We do very much mean the dirty part. It's not that we think it might have -// bugs. It's that we're SURE it does. Currently, we only handle raw mode, with no -// cooked mode features like scrolling. We also misinterpret some of the control -// codes, which may or may not matter for your purpose. -package vt100 - -import ( - "bytes" - "fmt" - "image/color" - "sort" - "strings" -) - -type Intensity int - -const ( - Normal Intensity = 0 - Bright = 1 - Dim = 2 - // TODO(jaguilar): Should this be in a subpackage, since the names are pretty collide-y? -) - -var ( - // Technically RGBAs are supposed to be premultiplied. But CSS doesn't expect them - // that way, so we won't do it in this file. - DefaultColor = color.RGBA{0, 0, 0, 0} - // Our black has 255 alpha, so it will compare negatively with DefaultColor. - Black = color.RGBA{0, 0, 0, 255} - Red = color.RGBA{255, 0, 0, 255} - Green = color.RGBA{0, 255, 0, 255} - Yellow = color.RGBA{255, 255, 0, 255} - Blue = color.RGBA{0, 0, 255, 255} - Magenta = color.RGBA{255, 0, 255, 255} - Cyan = color.RGBA{0, 255, 255, 255} - White = color.RGBA{255, 255, 255, 255} -) - -func (i Intensity) alpha() uint8 { - switch i { - case Bright: - return 255 - case Normal: - return 170 - case Dim: - return 85 - default: - return 170 - } -} - -// Format represents the display format of text on a terminal. -type Format struct { - // Fg is the foreground color. - Fg color.RGBA - // Bg is the background color. - Bg color.RGBA - // Intensity is the text intensity (bright, normal, dim). - Intensity Intensity - // Various text properties. - Underscore, Conceal, Negative, Blink, Inverse bool -} - -func toCss(c color.RGBA) string { - return fmt.Sprintf("rgba(%d, %d, %d, %f)", c.R, c.G, c.B, float32(c.A)/255) -} - -func (f Format) css() string { - parts := make([]string, 0) - fg, bg := f.Fg, f.Bg - if f.Inverse { - bg, fg = fg, bg - } - - if f.Intensity != Normal { - // Intensity only applies to the text -- i.e., the foreground. - fg.A = f.Intensity.alpha() - } - - if fg != DefaultColor { - parts = append(parts, "color:"+toCss(fg)) - } - if bg != DefaultColor { - parts = append(parts, "background-color:"+toCss(bg)) - } - if f.Underscore { - parts = append(parts, "text-decoration:underline") - } - if f.Conceal { - parts = append(parts, "display:none") - } - if f.Blink { - parts = append(parts, "text-decoration:blink") - } - - // We're not in performance sensitive code. Although this sort - // isn't strictly necessary, it gives us the nice property that - // the style of a particular set of attributes will always be - // generated the same way. As a result, we can use the html - // output in tests. - sort.StringSlice(parts).Sort() - - return strings.Join(parts, ";") -} - -// Cursor represents both the position and text type of the cursor. -type Cursor struct { - // Y and X are the coordinates. - Y, X int - - // F is the format that will be displayed. - F Format -} - -// VT100 represents a simplified, raw VT100 terminal. -type VT100 struct { - // Height and Width are the dimensions of the terminal. - Height, Width int - - // Content is the text in the terminal. - Content [][]rune - - // Format is the display properties of each cell. - Format [][]Format - - // Cursor is the current state of the cursor. - Cursor Cursor - - // savedCursor is the state of the cursor last time save() was called. - savedCursor Cursor - - unparsed []byte -} - -// NewVT100 creates a new VT100 object with the specified dimensions. y and x -// must both be greater than zero. -// -// Each cell is set to contain a ' ' rune, and all formats are left as the -// default. -func NewVT100(y, x int) *VT100 { - if y == 0 || x == 0 { - panic(fmt.Errorf("invalid dim (%d, %d)", y, x)) - } - - v := &VT100{ - Height: y, - Width: x, - Content: make([][]rune, y), - Format: make([][]Format, y), - } - - for row := 0; row < y; row++ { - v.Content[row] = make([]rune, x) - v.Format[row] = make([]Format, x) - - for col := 0; col < x; col++ { - v.clear(row, col) - } - } - return v -} - -func (v *VT100) UsedHeight() int { - count := 0 - for _, l := range v.Content { - for _, r := range l { - if r != ' ' { - count++ - break - } - } - } - return count -} - -func (v *VT100) Resize(y, x int) { - if y > v.Height { - n := y - v.Height - for row := 0; row < n; row++ { - v.Content = append(v.Content, make([]rune, v.Width)) - v.Format = append(v.Format, make([]Format, v.Width)) - for col := 0; col < v.Width; col++ { - v.clear(v.Height+row, col) - } - } - v.Height = y - } else if y < v.Height { - v.Content = v.Content[:y] - v.Height = y - } - - if x > v.Width { - for i := range v.Content { - row := make([]rune, x) - copy(row, v.Content[i]) - v.Content[i] = row - format := make([]Format, x) - copy(format, v.Format[i]) - v.Format[i] = format - for j := v.Width; j < x; j++ { - v.clear(i, j) - } - } - v.Width = x - } else if x < v.Width { - for i := range v.Content { - v.Content[i] = v.Content[i][:x] - v.Format[i] = v.Format[i][:x] - } - v.Width = x - } -} - -func (v *VT100) Write(dt []byte) (int, error) { - n := len(dt) - if len(v.unparsed) > 0 { - dt = append(v.unparsed, dt...) // this almost never happens - v.unparsed = nil - } - buf := bytes.NewBuffer(dt) - for { - if buf.Len() == 0 { - return n, nil - } - cmd, err := Decode(buf) - if err != nil { - if l := buf.Len(); l > 0 && l < 12 { // on small leftover handle unparsed, otherwise skip - v.unparsed = buf.Bytes() - } - return n, nil - } - v.Process(cmd) // ignore error - } -} - -// Process handles a single ANSI terminal command, updating the terminal -// appropriately. -// -// One special kind of error that this can return is an UnsupportedError. It's -// probably best to check for these and skip, because they are likely recoverable. -// Support errors are exported as expvars, so it is possibly not necessary to log -// them. If you want to check what's failed, start a debug http server and examine -// the vt100-unsupported-commands field in /debug/vars. -func (v *VT100) Process(c Command) error { - return c.display(v) -} - -// HTML renders v as an HTML fragment. One idea for how to use this is to debug -// the current state of the screen reader. -func (v *VT100) HTML() string { - var buf bytes.Buffer - buf.WriteString(`
`)
-
-	// Iterate each row. When the css changes, close the previous span, and open
-	// a new one. No need to close a span when the css is empty, we won't have
-	// opened one in the past.
-	var lastFormat Format
-	for y, row := range v.Content {
-		for x, r := range row {
-			f := v.Format[y][x]
-			if f != lastFormat {
-				if lastFormat != (Format{}) {
-					buf.WriteString("")
-				}
-				if f != (Format{}) {
-					buf.WriteString(``)
-				}
-				lastFormat = f
-			}
-			if s := maybeEscapeRune(r); s != "" {
-				buf.WriteString(s)
-			} else {
-				buf.WriteRune(r)
-			}
-		}
-		buf.WriteRune('\n')
-	}
-	buf.WriteString("
") - - return buf.String() -} - -// maybeEscapeRune potentially escapes a rune for display in an html document. -// It only escapes the things that html.EscapeString does, but it works without allocating -// a string to hold r. Returns an empty string if there is no need to escape. -func maybeEscapeRune(r rune) string { - switch r { - case '&': - return "&" - case '\'': - return "'" - case '<': - return "<" - case '>': - return ">" - case '"': - return """ - } - return "" -} - -// put puts r onto the current cursor's position, then advances the cursor. -func (v *VT100) put(r rune) { - v.scrollIfNeeded() - v.Content[v.Cursor.Y][v.Cursor.X] = r - v.Format[v.Cursor.Y][v.Cursor.X] = v.Cursor.F - v.advance() -} - -// advance advances the cursor, wrapping to the next line if need be. -func (v *VT100) advance() { - v.Cursor.X++ - if v.Cursor.X >= v.Width { - v.Cursor.X = 0 - v.Cursor.Y++ - } - // if v.Cursor.Y >= v.Height { - // // TODO(jaguilar): if we implement scroll, this should probably scroll. - // // v.Cursor.Y = 0 - // v.scroll() - // } -} - -func (v *VT100) scrollIfNeeded() { - if v.Cursor.Y >= v.Height { - first := v.Content[0] - copy(v.Content, v.Content[1:]) - for i := range first { - first[i] = ' ' - } - v.Content[v.Height-1] = first - v.Cursor.Y = v.Height - 1 - } -} - -// home moves the cursor to the coordinates y x. If y x are out of bounds, v.Err -// is set. -func (v *VT100) home(y, x int) { - v.Cursor.Y, v.Cursor.X = y, x -} - -// eraseDirection is the logical direction in which an erase command happens, -// from the cursor. For both erase commands, forward is 0, backward is 1, -// and everything is 2. -type eraseDirection int - -const ( - // From the cursor to the end, inclusive. - eraseForward eraseDirection = iota - - // From the beginning to the cursor, inclusive. - eraseBack - - // Everything. - eraseAll -) - -// eraseColumns erases columns from the current line. -func (v *VT100) eraseColumns(d eraseDirection) { - y, x := v.Cursor.Y, v.Cursor.X // Aliases for simplicity. - switch d { - case eraseBack: - v.eraseRegion(y, 0, y, x) - case eraseForward: - v.eraseRegion(y, x, y, v.Width-1) - case eraseAll: - v.eraseRegion(y, 0, y, v.Width-1) - } -} - -// eraseLines erases lines from the current terminal. Note that -// no matter what is selected, the entire current line is erased. -func (v *VT100) eraseLines(d eraseDirection) { - y := v.Cursor.Y // Alias for simplicity. - switch d { - case eraseBack: - v.eraseRegion(0, 0, y, v.Width-1) - case eraseForward: - v.eraseRegion(y, 0, v.Height-1, v.Width-1) - case eraseAll: - v.eraseRegion(0, 0, v.Height-1, v.Width-1) - } -} - -func (v *VT100) eraseRegion(y1, x1, y2, x2 int) { - // Do not sanitize or bounds-check these coordinates, since they come from the - // programmer (me). We should panic if any of them are out of bounds. - if y1 > y2 { - y1, y2 = y2, y1 - } - if x1 > x2 { - x1, x2 = x2, x1 - } - - for y := y1; y <= y2; y++ { - for x := x1; x <= x2; x++ { - v.clear(y, x) - } - } -} - -func (v *VT100) clear(y, x int) { - if y >= len(v.Content) || x >= len(v.Content[0]) { - return - } - v.Content[y][x] = ' ' - v.Format[y][x] = Format{} -} - -func (v *VT100) backspace() { - v.Cursor.X-- - if v.Cursor.X < 0 { - if v.Cursor.Y == 0 { - v.Cursor.X = 0 - } else { - v.Cursor.Y-- - v.Cursor.X = v.Width - 1 - } - } -} - -func (v *VT100) save() { - v.savedCursor = v.Cursor -} - -func (v *VT100) unsave() { - v.Cursor = v.savedCursor -} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go deleted file mode 100644 index d020e8df77ab..000000000000 --- a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go +++ /dev/null @@ -1,6429 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: control.proto - -package moby_buildkit_v1 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "github.com/golang/protobuf/ptypes/timestamp" - types "github.com/moby/buildkit/api/types" - pb "github.com/moby/buildkit/solver/pb" - github_com_moby_buildkit_util_entitlements "github.com/moby/buildkit/util/entitlements" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - time "time" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf -var _ = time.Kitchen - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type PruneRequest struct { - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` - All bool `protobuf:"varint,2,opt,name=all,proto3" json:"all,omitempty"` - KeepDuration int64 `protobuf:"varint,3,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` - KeepBytes int64 `protobuf:"varint,4,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PruneRequest) Reset() { *m = PruneRequest{} } -func (m *PruneRequest) String() string { return proto.CompactTextString(m) } -func (*PruneRequest) ProtoMessage() {} -func (*PruneRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{0} -} -func (m *PruneRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PruneRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PruneRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PruneRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PruneRequest.Merge(m, src) -} -func (m *PruneRequest) XXX_Size() int { - return m.Size() -} -func (m *PruneRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PruneRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PruneRequest proto.InternalMessageInfo - -func (m *PruneRequest) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -func (m *PruneRequest) GetAll() bool { - if m != nil { - return m.All - } - return false -} - -func (m *PruneRequest) GetKeepDuration() int64 { - if m != nil { - return m.KeepDuration - } - return 0 -} - -func (m *PruneRequest) GetKeepBytes() int64 { - if m != nil { - return m.KeepBytes - } - return 0 -} - -type DiskUsageRequest struct { - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DiskUsageRequest) Reset() { *m = DiskUsageRequest{} } -func (m *DiskUsageRequest) String() string { return proto.CompactTextString(m) } -func (*DiskUsageRequest) ProtoMessage() {} -func (*DiskUsageRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{1} -} -func (m *DiskUsageRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DiskUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DiskUsageRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DiskUsageRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_DiskUsageRequest.Merge(m, src) -} -func (m *DiskUsageRequest) XXX_Size() int { - return m.Size() -} -func (m *DiskUsageRequest) XXX_DiscardUnknown() { - xxx_messageInfo_DiskUsageRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_DiskUsageRequest proto.InternalMessageInfo - -func (m *DiskUsageRequest) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -type DiskUsageResponse struct { - Record []*UsageRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DiskUsageResponse) Reset() { *m = DiskUsageResponse{} } -func (m *DiskUsageResponse) String() string { return proto.CompactTextString(m) } -func (*DiskUsageResponse) ProtoMessage() {} -func (*DiskUsageResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{2} -} -func (m *DiskUsageResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DiskUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DiskUsageResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DiskUsageResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_DiskUsageResponse.Merge(m, src) -} -func (m *DiskUsageResponse) XXX_Size() int { - return m.Size() -} -func (m *DiskUsageResponse) XXX_DiscardUnknown() { - xxx_messageInfo_DiskUsageResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_DiskUsageResponse proto.InternalMessageInfo - -func (m *DiskUsageResponse) GetRecord() []*UsageRecord { - if m != nil { - return m.Record - } - return nil -} - -type UsageRecord struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` - InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` - Size_ int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` - Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` - CreatedAt time.Time `protobuf:"bytes,6,opt,name=CreatedAt,proto3,stdtime" json:"CreatedAt"` - LastUsedAt *time.Time `protobuf:"bytes,7,opt,name=LastUsedAt,proto3,stdtime" json:"LastUsedAt,omitempty"` - UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` - Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` - RecordType string `protobuf:"bytes,10,opt,name=RecordType,proto3" json:"RecordType,omitempty"` - Shared bool `protobuf:"varint,11,opt,name=Shared,proto3" json:"Shared,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *UsageRecord) Reset() { *m = UsageRecord{} } -func (m *UsageRecord) String() string { return proto.CompactTextString(m) } -func (*UsageRecord) ProtoMessage() {} -func (*UsageRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{3} -} -func (m *UsageRecord) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_UsageRecord.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *UsageRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_UsageRecord.Merge(m, src) -} -func (m *UsageRecord) XXX_Size() int { - return m.Size() -} -func (m *UsageRecord) XXX_DiscardUnknown() { - xxx_messageInfo_UsageRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_UsageRecord proto.InternalMessageInfo - -func (m *UsageRecord) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *UsageRecord) GetMutable() bool { - if m != nil { - return m.Mutable - } - return false -} - -func (m *UsageRecord) GetInUse() bool { - if m != nil { - return m.InUse - } - return false -} - -func (m *UsageRecord) GetSize_() int64 { - if m != nil { - return m.Size_ - } - return 0 -} - -func (m *UsageRecord) GetParent() string { - if m != nil { - return m.Parent - } - return "" -} - -func (m *UsageRecord) GetCreatedAt() time.Time { - if m != nil { - return m.CreatedAt - } - return time.Time{} -} - -func (m *UsageRecord) GetLastUsedAt() *time.Time { - if m != nil { - return m.LastUsedAt - } - return nil -} - -func (m *UsageRecord) GetUsageCount() int64 { - if m != nil { - return m.UsageCount - } - return 0 -} - -func (m *UsageRecord) GetDescription() string { - if m != nil { - return m.Description - } - return "" -} - -func (m *UsageRecord) GetRecordType() string { - if m != nil { - return m.RecordType - } - return "" -} - -func (m *UsageRecord) GetShared() bool { - if m != nil { - return m.Shared - } - return false -} - -type SolveRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"` - Exporter string `protobuf:"bytes,3,opt,name=Exporter,proto3" json:"Exporter,omitempty"` - ExporterAttrs map[string]string `protobuf:"bytes,4,rep,name=ExporterAttrs,proto3" json:"ExporterAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Session string `protobuf:"bytes,5,opt,name=Session,proto3" json:"Session,omitempty"` - Frontend string `protobuf:"bytes,6,opt,name=Frontend,proto3" json:"Frontend,omitempty"` - FrontendAttrs map[string]string `protobuf:"bytes,7,rep,name=FrontendAttrs,proto3" json:"FrontendAttrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Cache CacheOptions `protobuf:"bytes,8,opt,name=Cache,proto3" json:"Cache"` - Entitlements []github_com_moby_buildkit_util_entitlements.Entitlement `protobuf:"bytes,9,rep,name=Entitlements,proto3,customtype=github.com/moby/buildkit/util/entitlements.Entitlement" json:"Entitlements,omitempty"` - FrontendInputs map[string]*pb.Definition `protobuf:"bytes,10,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SolveRequest) Reset() { *m = SolveRequest{} } -func (m *SolveRequest) String() string { return proto.CompactTextString(m) } -func (*SolveRequest) ProtoMessage() {} -func (*SolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{4} -} -func (m *SolveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SolveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SolveRequest.Merge(m, src) -} -func (m *SolveRequest) XXX_Size() int { - return m.Size() -} -func (m *SolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SolveRequest proto.InternalMessageInfo - -func (m *SolveRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *SolveRequest) GetDefinition() *pb.Definition { - if m != nil { - return m.Definition - } - return nil -} - -func (m *SolveRequest) GetExporter() string { - if m != nil { - return m.Exporter - } - return "" -} - -func (m *SolveRequest) GetExporterAttrs() map[string]string { - if m != nil { - return m.ExporterAttrs - } - return nil -} - -func (m *SolveRequest) GetSession() string { - if m != nil { - return m.Session - } - return "" -} - -func (m *SolveRequest) GetFrontend() string { - if m != nil { - return m.Frontend - } - return "" -} - -func (m *SolveRequest) GetFrontendAttrs() map[string]string { - if m != nil { - return m.FrontendAttrs - } - return nil -} - -func (m *SolveRequest) GetCache() CacheOptions { - if m != nil { - return m.Cache - } - return CacheOptions{} -} - -func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition { - if m != nil { - return m.FrontendInputs - } - return nil -} - -type CacheOptions struct { - // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. - // When ExportRefDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = ExportAttrs.add("ref", ExportRef)} - // to Exports for compatibility. (planned to be removed) - ExportRefDeprecated string `protobuf:"bytes,1,opt,name=ExportRefDeprecated,proto3" json:"ExportRefDeprecated,omitempty"` - // ImportRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importRef}} - // for each of the ImportRefs entry to Imports for compatibility. (planned to be removed) - ImportRefsDeprecated []string `protobuf:"bytes,2,rep,name=ImportRefsDeprecated,proto3" json:"ImportRefsDeprecated,omitempty"` - // ExportAttrsDeprecated is deprecated since BuildKit v0.4.0. - // See the description of ExportRefDeprecated. - ExportAttrsDeprecated map[string]string `protobuf:"bytes,3,rep,name=ExportAttrsDeprecated,proto3" json:"ExportAttrsDeprecated,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Exports was introduced in BuildKit v0.4.0. - Exports []*CacheOptionsEntry `protobuf:"bytes,4,rep,name=Exports,proto3" json:"Exports,omitempty"` - // Imports was introduced in BuildKit v0.4.0. - Imports []*CacheOptionsEntry `protobuf:"bytes,5,rep,name=Imports,proto3" json:"Imports,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CacheOptions) Reset() { *m = CacheOptions{} } -func (m *CacheOptions) String() string { return proto.CompactTextString(m) } -func (*CacheOptions) ProtoMessage() {} -func (*CacheOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{5} -} -func (m *CacheOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CacheOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CacheOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CacheOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CacheOptions.Merge(m, src) -} -func (m *CacheOptions) XXX_Size() int { - return m.Size() -} -func (m *CacheOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CacheOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_CacheOptions proto.InternalMessageInfo - -func (m *CacheOptions) GetExportRefDeprecated() string { - if m != nil { - return m.ExportRefDeprecated - } - return "" -} - -func (m *CacheOptions) GetImportRefsDeprecated() []string { - if m != nil { - return m.ImportRefsDeprecated - } - return nil -} - -func (m *CacheOptions) GetExportAttrsDeprecated() map[string]string { - if m != nil { - return m.ExportAttrsDeprecated - } - return nil -} - -func (m *CacheOptions) GetExports() []*CacheOptionsEntry { - if m != nil { - return m.Exports - } - return nil -} - -func (m *CacheOptions) GetImports() []*CacheOptionsEntry { - if m != nil { - return m.Imports - } - return nil -} - -type CacheOptionsEntry struct { - // Type is like "registry" or "local" - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` - // Attrs are like mode=(min,max), ref=example.com:5000/foo/bar . - // See cache importer/exporter implementations' documentation. - Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CacheOptionsEntry) Reset() { *m = CacheOptionsEntry{} } -func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) } -func (*CacheOptionsEntry) ProtoMessage() {} -func (*CacheOptionsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{6} -} -func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CacheOptionsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CacheOptionsEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CacheOptionsEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_CacheOptionsEntry.Merge(m, src) -} -func (m *CacheOptionsEntry) XXX_Size() int { - return m.Size() -} -func (m *CacheOptionsEntry) XXX_DiscardUnknown() { - xxx_messageInfo_CacheOptionsEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_CacheOptionsEntry proto.InternalMessageInfo - -func (m *CacheOptionsEntry) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *CacheOptionsEntry) GetAttrs() map[string]string { - if m != nil { - return m.Attrs - } - return nil -} - -type SolveResponse struct { - ExporterResponse map[string]string `protobuf:"bytes,1,rep,name=ExporterResponse,proto3" json:"ExporterResponse,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SolveResponse) Reset() { *m = SolveResponse{} } -func (m *SolveResponse) String() string { return proto.CompactTextString(m) } -func (*SolveResponse) ProtoMessage() {} -func (*SolveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{7} -} -func (m *SolveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SolveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SolveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SolveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SolveResponse.Merge(m, src) -} -func (m *SolveResponse) XXX_Size() int { - return m.Size() -} -func (m *SolveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SolveResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SolveResponse proto.InternalMessageInfo - -func (m *SolveResponse) GetExporterResponse() map[string]string { - if m != nil { - return m.ExporterResponse - } - return nil -} - -type StatusRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusRequest) Reset() { *m = StatusRequest{} } -func (m *StatusRequest) String() string { return proto.CompactTextString(m) } -func (*StatusRequest) ProtoMessage() {} -func (*StatusRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{8} -} -func (m *StatusRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusRequest.Merge(m, src) -} -func (m *StatusRequest) XXX_Size() int { - return m.Size() -} -func (m *StatusRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatusRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusRequest proto.InternalMessageInfo - -func (m *StatusRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -type StatusResponse struct { - Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes,proto3" json:"vertexes,omitempty"` - Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses,proto3" json:"statuses,omitempty"` - Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs,proto3" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatusResponse) Reset() { *m = StatusResponse{} } -func (m *StatusResponse) String() string { return proto.CompactTextString(m) } -func (*StatusResponse) ProtoMessage() {} -func (*StatusResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{9} -} -func (m *StatusResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatusResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusResponse.Merge(m, src) -} -func (m *StatusResponse) XXX_Size() int { - return m.Size() -} -func (m *StatusResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatusResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatusResponse proto.InternalMessageInfo - -func (m *StatusResponse) GetVertexes() []*Vertex { - if m != nil { - return m.Vertexes - } - return nil -} - -func (m *StatusResponse) GetStatuses() []*VertexStatus { - if m != nil { - return m.Statuses - } - return nil -} - -func (m *StatusResponse) GetLogs() []*VertexLog { - if m != nil { - return m.Logs - } - return nil -} - -type Vertex struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - Inputs []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=inputs,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"inputs"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Cached bool `protobuf:"varint,4,opt,name=cached,proto3" json:"cached,omitempty"` - Started *time.Time `protobuf:"bytes,5,opt,name=started,proto3,stdtime" json:"started,omitempty"` - Completed *time.Time `protobuf:"bytes,6,opt,name=completed,proto3,stdtime" json:"completed,omitempty"` - Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Vertex) Reset() { *m = Vertex{} } -func (m *Vertex) String() string { return proto.CompactTextString(m) } -func (*Vertex) ProtoMessage() {} -func (*Vertex) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{10} -} -func (m *Vertex) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Vertex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Vertex.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Vertex) XXX_Merge(src proto.Message) { - xxx_messageInfo_Vertex.Merge(m, src) -} -func (m *Vertex) XXX_Size() int { - return m.Size() -} -func (m *Vertex) XXX_DiscardUnknown() { - xxx_messageInfo_Vertex.DiscardUnknown(m) -} - -var xxx_messageInfo_Vertex proto.InternalMessageInfo - -func (m *Vertex) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Vertex) GetCached() bool { - if m != nil { - return m.Cached - } - return false -} - -func (m *Vertex) GetStarted() *time.Time { - if m != nil { - return m.Started - } - return nil -} - -func (m *Vertex) GetCompleted() *time.Time { - if m != nil { - return m.Completed - } - return nil -} - -func (m *Vertex) GetError() string { - if m != nil { - return m.Error - } - return "" -} - -type VertexStatus struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - // TODO: add started, completed - Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Started *time.Time `protobuf:"bytes,7,opt,name=started,proto3,stdtime" json:"started,omitempty"` - Completed *time.Time `protobuf:"bytes,8,opt,name=completed,proto3,stdtime" json:"completed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VertexStatus) Reset() { *m = VertexStatus{} } -func (m *VertexStatus) String() string { return proto.CompactTextString(m) } -func (*VertexStatus) ProtoMessage() {} -func (*VertexStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{11} -} -func (m *VertexStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VertexStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VertexStatus.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VertexStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_VertexStatus.Merge(m, src) -} -func (m *VertexStatus) XXX_Size() int { - return m.Size() -} -func (m *VertexStatus) XXX_DiscardUnknown() { - xxx_messageInfo_VertexStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_VertexStatus proto.InternalMessageInfo - -func (m *VertexStatus) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *VertexStatus) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *VertexStatus) GetCurrent() int64 { - if m != nil { - return m.Current - } - return 0 -} - -func (m *VertexStatus) GetTotal() int64 { - if m != nil { - return m.Total - } - return 0 -} - -func (m *VertexStatus) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -func (m *VertexStatus) GetStarted() *time.Time { - if m != nil { - return m.Started - } - return nil -} - -func (m *VertexStatus) GetCompleted() *time.Time { - if m != nil { - return m.Completed - } - return nil -} - -type VertexLog struct { - Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` - Timestamp time.Time `protobuf:"bytes,2,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Stream int64 `protobuf:"varint,3,opt,name=stream,proto3" json:"stream,omitempty"` - Msg []byte `protobuf:"bytes,4,opt,name=msg,proto3" json:"msg,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *VertexLog) Reset() { *m = VertexLog{} } -func (m *VertexLog) String() string { return proto.CompactTextString(m) } -func (*VertexLog) ProtoMessage() {} -func (*VertexLog) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{12} -} -func (m *VertexLog) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VertexLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VertexLog.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VertexLog) XXX_Merge(src proto.Message) { - xxx_messageInfo_VertexLog.Merge(m, src) -} -func (m *VertexLog) XXX_Size() int { - return m.Size() -} -func (m *VertexLog) XXX_DiscardUnknown() { - xxx_messageInfo_VertexLog.DiscardUnknown(m) -} - -var xxx_messageInfo_VertexLog proto.InternalMessageInfo - -func (m *VertexLog) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - -func (m *VertexLog) GetStream() int64 { - if m != nil { - return m.Stream - } - return 0 -} - -func (m *VertexLog) GetMsg() []byte { - if m != nil { - return m.Msg - } - return nil -} - -type BytesMessage struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *BytesMessage) Reset() { *m = BytesMessage{} } -func (m *BytesMessage) String() string { return proto.CompactTextString(m) } -func (*BytesMessage) ProtoMessage() {} -func (*BytesMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{13} -} -func (m *BytesMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BytesMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesMessage.Merge(m, src) -} -func (m *BytesMessage) XXX_Size() int { - return m.Size() -} -func (m *BytesMessage) XXX_DiscardUnknown() { - xxx_messageInfo_BytesMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesMessage proto.InternalMessageInfo - -func (m *BytesMessage) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type ListWorkersRequest struct { - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListWorkersRequest) Reset() { *m = ListWorkersRequest{} } -func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) } -func (*ListWorkersRequest) ProtoMessage() {} -func (*ListWorkersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{14} -} -func (m *ListWorkersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListWorkersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListWorkersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListWorkersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListWorkersRequest.Merge(m, src) -} -func (m *ListWorkersRequest) XXX_Size() int { - return m.Size() -} -func (m *ListWorkersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ListWorkersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ListWorkersRequest proto.InternalMessageInfo - -func (m *ListWorkersRequest) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -type ListWorkersResponse struct { - Record []*types.WorkerRecord `protobuf:"bytes,1,rep,name=record,proto3" json:"record,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ListWorkersResponse) Reset() { *m = ListWorkersResponse{} } -func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) } -func (*ListWorkersResponse) ProtoMessage() {} -func (*ListWorkersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{15} -} -func (m *ListWorkersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListWorkersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ListWorkersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ListWorkersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListWorkersResponse.Merge(m, src) -} -func (m *ListWorkersResponse) XXX_Size() int { - return m.Size() -} -func (m *ListWorkersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ListWorkersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ListWorkersResponse proto.InternalMessageInfo - -func (m *ListWorkersResponse) GetRecord() []*types.WorkerRecord { - if m != nil { - return m.Record - } - return nil -} - -func init() { - proto.RegisterType((*PruneRequest)(nil), "moby.buildkit.v1.PruneRequest") - proto.RegisterType((*DiskUsageRequest)(nil), "moby.buildkit.v1.DiskUsageRequest") - proto.RegisterType((*DiskUsageResponse)(nil), "moby.buildkit.v1.DiskUsageResponse") - proto.RegisterType((*UsageRecord)(nil), "moby.buildkit.v1.UsageRecord") - proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.SolveRequest") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.ExporterAttrsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveRequest.FrontendAttrsEntry") - proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.SolveRequest.FrontendInputsEntry") - proto.RegisterType((*CacheOptions)(nil), "moby.buildkit.v1.CacheOptions") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptions.ExportAttrsDeprecatedEntry") - proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.CacheOptionsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.CacheOptionsEntry.AttrsEntry") - proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.SolveResponse") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.SolveResponse.ExporterResponseEntry") - proto.RegisterType((*StatusRequest)(nil), "moby.buildkit.v1.StatusRequest") - proto.RegisterType((*StatusResponse)(nil), "moby.buildkit.v1.StatusResponse") - proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") - proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") - proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") - proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") - proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") - proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") -} - -func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } - -var fileDescriptor_0c5120591600887d = []byte{ - // 1397 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xef, 0xda, 0xf1, 0xdb, 0x63, 0x27, 0x4a, 0xa7, 0xfd, 0x57, 0xab, 0xfd, 0x8b, 0xc4, 0x6c, - 0x8b, 0x64, 0x55, 0xed, 0x3a, 0x35, 0x14, 0x95, 0x08, 0x50, 0xeb, 0xb8, 0xa8, 0xa9, 0x1a, 0x51, - 0x36, 0x2d, 0x95, 0x7a, 0x40, 0x5a, 0xdb, 0x13, 0x77, 0x95, 0xf5, 0xce, 0x32, 0x33, 0x1b, 0x6a, - 0x3e, 0x00, 0x67, 0xbe, 0x03, 0x07, 0x4e, 0x9c, 0x38, 0xf0, 0x09, 0x90, 0x7a, 0xe4, 0xdc, 0x43, - 0x40, 0xb9, 0xc3, 0x9d, 0x1b, 0x9a, 0x97, 0x75, 0xc6, 0xb1, 0x9d, 0xc4, 0xe9, 0xc9, 0xf3, 0x8c, - 0x9f, 0xdf, 0x6f, 0x9f, 0xd7, 0x99, 0x79, 0x60, 0xb9, 0x47, 0x62, 0x4e, 0x49, 0xe4, 0x25, 0x94, - 0x70, 0x82, 0x56, 0x87, 0xa4, 0x3b, 0xf2, 0xba, 0x69, 0x18, 0xf5, 0xf7, 0x43, 0xee, 0x1d, 0xdc, - 0x71, 0x6e, 0x0f, 0x42, 0xfe, 0x2a, 0xed, 0x7a, 0x3d, 0x32, 0x6c, 0x0e, 0xc8, 0x80, 0x34, 0xa5, - 0x62, 0x37, 0xdd, 0x93, 0x92, 0x14, 0xe4, 0x4a, 0x11, 0x38, 0xeb, 0x03, 0x42, 0x06, 0x11, 0x3e, - 0xd6, 0xe2, 0xe1, 0x10, 0x33, 0x1e, 0x0c, 0x13, 0xad, 0x70, 0xcb, 0xe0, 0x13, 0x1f, 0x6b, 0x66, - 0x1f, 0x6b, 0x32, 0x12, 0x1d, 0x60, 0xda, 0x4c, 0xba, 0x4d, 0x92, 0x30, 0xad, 0xdd, 0x9c, 0xab, - 0x1d, 0x24, 0x61, 0x93, 0x8f, 0x12, 0xcc, 0x9a, 0xdf, 0x11, 0xba, 0x8f, 0xa9, 0x02, 0xb8, 0x3f, - 0x58, 0x50, 0x7b, 0x4a, 0xd3, 0x18, 0xfb, 0xf8, 0xdb, 0x14, 0x33, 0x8e, 0xae, 0x41, 0x71, 0x2f, - 0x8c, 0x38, 0xa6, 0xb6, 0x55, 0xcf, 0x37, 0x2a, 0xbe, 0x96, 0xd0, 0x2a, 0xe4, 0x83, 0x28, 0xb2, - 0x73, 0x75, 0xab, 0x51, 0xf6, 0xc5, 0x12, 0x35, 0xa0, 0xb6, 0x8f, 0x71, 0xd2, 0x49, 0x69, 0xc0, - 0x43, 0x12, 0xdb, 0xf9, 0xba, 0xd5, 0xc8, 0xb7, 0x97, 0xde, 0x1c, 0xae, 0x5b, 0xfe, 0xc4, 0x3f, - 0xc8, 0x85, 0x8a, 0x90, 0xdb, 0x23, 0x8e, 0x99, 0xbd, 0x64, 0xa8, 0x1d, 0x6f, 0xbb, 0x37, 0x61, - 0xb5, 0x13, 0xb2, 0xfd, 0xe7, 0x2c, 0x18, 0x9c, 0x65, 0x8b, 0xfb, 0x18, 0x2e, 0x1b, 0xba, 0x2c, - 0x21, 0x31, 0xc3, 0xe8, 0x2e, 0x14, 0x29, 0xee, 0x11, 0xda, 0x97, 0xca, 0xd5, 0xd6, 0x7b, 0xde, - 0xc9, 0xdc, 0x78, 0x1a, 0x20, 0x94, 0x7c, 0xad, 0xec, 0xfe, 0x9b, 0x83, 0xaa, 0xb1, 0x8f, 0x56, - 0x20, 0xb7, 0xdd, 0xb1, 0xad, 0xba, 0xd5, 0xa8, 0xf8, 0xb9, 0xed, 0x0e, 0xb2, 0xa1, 0xb4, 0x93, - 0xf2, 0xa0, 0x1b, 0x61, 0xed, 0x7b, 0x26, 0xa2, 0xab, 0x50, 0xd8, 0x8e, 0x9f, 0x33, 0x2c, 0x1d, - 0x2f, 0xfb, 0x4a, 0x40, 0x08, 0x96, 0x76, 0xc3, 0xef, 0xb1, 0x72, 0xd3, 0x97, 0x6b, 0xe1, 0xc7, - 0xd3, 0x80, 0xe2, 0x98, 0xdb, 0x05, 0xc9, 0xab, 0x25, 0xd4, 0x86, 0xca, 0x16, 0xc5, 0x01, 0xc7, - 0xfd, 0x07, 0xdc, 0x2e, 0xd6, 0xad, 0x46, 0xb5, 0xe5, 0x78, 0xaa, 0x20, 0xbc, 0xac, 0x20, 0xbc, - 0x67, 0x59, 0x41, 0xb4, 0xcb, 0x6f, 0x0e, 0xd7, 0x2f, 0xfd, 0xf8, 0xa7, 0x88, 0xdb, 0x18, 0x86, - 0xee, 0x03, 0x3c, 0x09, 0x18, 0x7f, 0xce, 0x24, 0x49, 0xe9, 0x4c, 0x92, 0x25, 0x49, 0x60, 0x60, - 0xd0, 0x1a, 0x80, 0x0c, 0xc0, 0x16, 0x49, 0x63, 0x6e, 0x97, 0xa5, 0xdd, 0xc6, 0x0e, 0xaa, 0x43, - 0xb5, 0x83, 0x59, 0x8f, 0x86, 0x89, 0x4c, 0x73, 0x45, 0xba, 0x60, 0x6e, 0x09, 0x06, 0x15, 0xbd, - 0x67, 0xa3, 0x04, 0xdb, 0x20, 0x15, 0x8c, 0x1d, 0xe1, 0xff, 0xee, 0xab, 0x80, 0xe2, 0xbe, 0x5d, - 0x95, 0xa1, 0xd2, 0x92, 0xfb, 0x53, 0x11, 0x6a, 0xbb, 0xa2, 0x8a, 0xb3, 0x84, 0xaf, 0x42, 0xde, - 0xc7, 0x7b, 0x3a, 0xfa, 0x62, 0x89, 0x3c, 0x80, 0x0e, 0xde, 0x0b, 0xe3, 0x50, 0x7e, 0x3b, 0x27, - 0xdd, 0x5b, 0xf1, 0x92, 0xae, 0x77, 0xbc, 0xeb, 0x1b, 0x1a, 0xc8, 0x81, 0xf2, 0xc3, 0xd7, 0x09, - 0xa1, 0xa2, 0x68, 0xf2, 0x92, 0x66, 0x2c, 0xa3, 0x17, 0xb0, 0x9c, 0xad, 0x1f, 0x70, 0x4e, 0x45, - 0x29, 0x8a, 0x42, 0xb9, 0x33, 0x5d, 0x28, 0xa6, 0x51, 0xde, 0x04, 0xe6, 0x61, 0xcc, 0xe9, 0xc8, - 0x9f, 0xe4, 0x11, 0x35, 0xb2, 0x8b, 0x19, 0x13, 0x16, 0xaa, 0x04, 0x67, 0xa2, 0x30, 0xe7, 0x0b, - 0x4a, 0x62, 0x8e, 0xe3, 0xbe, 0x4c, 0x70, 0xc5, 0x1f, 0xcb, 0xc2, 0x9c, 0x6c, 0xad, 0xcc, 0x29, - 0x9d, 0xcb, 0x9c, 0x09, 0x8c, 0x36, 0x67, 0x62, 0x0f, 0x6d, 0x42, 0x61, 0x2b, 0xe8, 0xbd, 0xc2, - 0x32, 0x97, 0xd5, 0xd6, 0xda, 0x34, 0xa1, 0xfc, 0xfb, 0x4b, 0x99, 0x3c, 0x26, 0x5b, 0xf1, 0x92, - 0xaf, 0x20, 0xe8, 0x1b, 0xa8, 0x3d, 0x8c, 0x79, 0xc8, 0x23, 0x3c, 0xc4, 0x31, 0x67, 0x76, 0x45, - 0x34, 0x5e, 0x7b, 0xf3, 0xed, 0xe1, 0xfa, 0xc7, 0x73, 0x8f, 0x96, 0x94, 0x87, 0x51, 0x13, 0x1b, - 0x28, 0xcf, 0xa0, 0xf0, 0x27, 0xf8, 0xd0, 0x4b, 0x58, 0xc9, 0x8c, 0xdd, 0x8e, 0x93, 0x94, 0x33, - 0x1b, 0xa4, 0xd7, 0xad, 0x73, 0x7a, 0xad, 0x40, 0xca, 0xed, 0x13, 0x4c, 0xce, 0x7d, 0x40, 0xd3, - 0xb9, 0x12, 0x35, 0xb5, 0x8f, 0x47, 0x59, 0x4d, 0xed, 0xe3, 0x91, 0x68, 0xdc, 0x83, 0x20, 0x4a, - 0x55, 0x43, 0x57, 0x7c, 0x25, 0x6c, 0xe6, 0xee, 0x59, 0x82, 0x61, 0x3a, 0xbc, 0x0b, 0x31, 0x7c, - 0x05, 0x57, 0x66, 0x98, 0x3a, 0x83, 0xe2, 0x86, 0x49, 0x31, 0x5d, 0xd3, 0xc7, 0x94, 0xee, 0x2f, - 0x79, 0xa8, 0x99, 0x09, 0x43, 0x1b, 0x70, 0x45, 0xf9, 0xe9, 0xe3, 0xbd, 0x0e, 0x4e, 0x28, 0xee, - 0x89, 0xb3, 0x40, 0x93, 0xcf, 0xfa, 0x0b, 0xb5, 0xe0, 0xea, 0xf6, 0x50, 0x6f, 0x33, 0x03, 0x92, - 0x93, 0xc7, 0xea, 0xcc, 0xff, 0x10, 0x81, 0xff, 0x29, 0x2a, 0x19, 0x09, 0x03, 0x94, 0x97, 0x09, - 0xfb, 0xe4, 0xf4, 0xaa, 0xf2, 0x66, 0x62, 0x55, 0xde, 0x66, 0xf3, 0xa2, 0xcf, 0xa0, 0xa4, 0xfe, - 0xc8, 0x1a, 0xf3, 0xfa, 0xe9, 0x9f, 0x50, 0x64, 0x19, 0x46, 0xc0, 0x95, 0x1f, 0xcc, 0x2e, 0x2c, - 0x00, 0xd7, 0x18, 0xe7, 0x11, 0x38, 0xf3, 0x4d, 0x5e, 0xa4, 0x04, 0xdc, 0x9f, 0x2d, 0xb8, 0x3c, - 0xf5, 0x21, 0x71, 0x2f, 0xc8, 0xd3, 0x51, 0x51, 0xc8, 0x35, 0xea, 0x40, 0x41, 0x75, 0x7e, 0x4e, - 0x1a, 0xec, 0x9d, 0xc3, 0x60, 0xcf, 0x68, 0x7b, 0x05, 0x76, 0xee, 0x01, 0x5c, 0xac, 0x58, 0xdd, - 0xdf, 0x2c, 0x58, 0xd6, 0x5d, 0xa6, 0x2f, 0xd1, 0x00, 0x56, 0xb3, 0x16, 0xca, 0xf6, 0xf4, 0x75, - 0x7a, 0x77, 0x6e, 0x83, 0x2a, 0x35, 0xef, 0x24, 0x4e, 0xd9, 0x38, 0x45, 0xe7, 0x6c, 0x65, 0x75, - 0x75, 0x42, 0x75, 0x21, 0xcb, 0xdf, 0x87, 0xe5, 0x5d, 0x1e, 0xf0, 0x94, 0xcd, 0xbd, 0x39, 0xdc, - 0x5f, 0x2d, 0x58, 0xc9, 0x74, 0xb4, 0x77, 0x1f, 0x41, 0xf9, 0x00, 0x53, 0x8e, 0x5f, 0x63, 0xa6, - 0xbd, 0xb2, 0xa7, 0xbd, 0xfa, 0x5a, 0x6a, 0xf8, 0x63, 0x4d, 0xb4, 0x09, 0x65, 0x26, 0x79, 0x70, - 0x96, 0xa8, 0xb5, 0x79, 0x28, 0xfd, 0xbd, 0xb1, 0x3e, 0x6a, 0xc2, 0x52, 0x44, 0x06, 0x4c, 0xf7, - 0xcc, 0xff, 0xe7, 0xe1, 0x9e, 0x90, 0x81, 0x2f, 0x15, 0xdd, 0xc3, 0x1c, 0x14, 0xd5, 0x1e, 0x7a, - 0x0c, 0xc5, 0x7e, 0x38, 0xc0, 0x8c, 0x2b, 0xaf, 0xda, 0x2d, 0x71, 0x4e, 0xbf, 0x3d, 0x5c, 0xbf, - 0x69, 0x1c, 0xc4, 0x24, 0xc1, 0xb1, 0x78, 0x91, 0x06, 0x61, 0x8c, 0x29, 0x6b, 0x0e, 0xc8, 0x6d, - 0x05, 0xf1, 0x3a, 0xf2, 0xc7, 0xd7, 0x0c, 0x82, 0x2b, 0x54, 0xc7, 0xad, 0x6c, 0xf9, 0x8b, 0x71, - 0x29, 0x06, 0x51, 0xc9, 0x71, 0x30, 0xc4, 0xfa, 0x7a, 0x95, 0x6b, 0x71, 0xc3, 0xf7, 0x44, 0xa9, - 0xf6, 0xe5, 0xbb, 0xa7, 0xec, 0x6b, 0x09, 0x6d, 0x42, 0x89, 0xf1, 0x80, 0x8a, 0x63, 0xa3, 0x70, - 0xce, 0xa7, 0x49, 0x06, 0x40, 0x9f, 0x43, 0xa5, 0x47, 0x86, 0x49, 0x84, 0x05, 0xba, 0x78, 0x4e, - 0xf4, 0x31, 0x44, 0x54, 0x0f, 0xa6, 0x94, 0x50, 0xf9, 0x28, 0xaa, 0xf8, 0x4a, 0x70, 0xff, 0xc9, - 0x41, 0xcd, 0x4c, 0xd6, 0xd4, 0x83, 0xef, 0x31, 0x14, 0x55, 0xea, 0x55, 0xd5, 0x5d, 0x2c, 0x54, - 0x8a, 0x61, 0x66, 0xa8, 0x6c, 0x28, 0xf5, 0x52, 0x2a, 0x5f, 0x83, 0xea, 0x8d, 0x98, 0x89, 0xc2, - 0x60, 0x4e, 0x78, 0x10, 0xc9, 0x50, 0xe5, 0x7d, 0x25, 0x88, 0x47, 0xe2, 0x78, 0x26, 0x58, 0xec, - 0x91, 0x38, 0x86, 0x99, 0x69, 0x28, 0xbd, 0x53, 0x1a, 0xca, 0x0b, 0xa7, 0xc1, 0xfd, 0xdd, 0x82, - 0xca, 0xb8, 0xca, 0x8d, 0xe8, 0x5a, 0xef, 0x1c, 0xdd, 0x89, 0xc8, 0xe4, 0x2e, 0x16, 0x99, 0x6b, - 0x50, 0x64, 0x9c, 0xe2, 0x60, 0xa8, 0xc6, 0x17, 0x5f, 0x4b, 0xe2, 0x3c, 0x19, 0xb2, 0x81, 0xcc, - 0x50, 0xcd, 0x17, 0x4b, 0xd7, 0x85, 0x9a, 0x9c, 0x54, 0x76, 0x30, 0x13, 0x6f, 0x63, 0x91, 0xdb, - 0x7e, 0xc0, 0x03, 0xe9, 0x47, 0xcd, 0x97, 0x6b, 0xf7, 0x16, 0xa0, 0x27, 0x21, 0xe3, 0x2f, 0xe4, - 0x84, 0xc5, 0xce, 0x1a, 0x63, 0x76, 0xe1, 0xca, 0x84, 0xb6, 0x3e, 0xa5, 0x3e, 0x3d, 0x31, 0xc8, - 0xdc, 0x98, 0x3e, 0x35, 0xe4, 0x20, 0xe7, 0x29, 0xe0, 0xe4, 0x3c, 0xd3, 0xfa, 0x3b, 0x0f, 0xa5, - 0x2d, 0x35, 0xa3, 0xa2, 0x67, 0x50, 0x19, 0xcf, 0x49, 0xc8, 0x9d, 0xa6, 0x39, 0x39, 0x70, 0x39, - 0xd7, 0x4f, 0xd5, 0xd1, 0xf6, 0x3d, 0x82, 0x82, 0x9c, 0x18, 0xd1, 0x8c, 0x63, 0xd0, 0x1c, 0x25, - 0x9d, 0xd3, 0x27, 0xb0, 0x0d, 0x4b, 0x30, 0xc9, 0x3b, 0x64, 0x16, 0x93, 0xf9, 0xfa, 0x73, 0xd6, - 0xcf, 0xb8, 0x7c, 0xd0, 0x0e, 0x14, 0x75, 0x3b, 0xcf, 0x52, 0x35, 0x6f, 0x0a, 0xa7, 0x3e, 0x5f, - 0x41, 0x91, 0x6d, 0x58, 0x68, 0x67, 0xfc, 0xa0, 0x9f, 0x65, 0x9a, 0x59, 0x06, 0xce, 0x19, 0xff, - 0x37, 0xac, 0x0d, 0x0b, 0xbd, 0x84, 0xaa, 0x91, 0x68, 0x34, 0x23, 0xa1, 0xd3, 0x55, 0xe3, 0x7c, - 0x70, 0x86, 0x96, 0x32, 0xb6, 0x5d, 0x7b, 0x73, 0xb4, 0x66, 0xfd, 0x71, 0xb4, 0x66, 0xfd, 0x75, - 0xb4, 0x66, 0x75, 0x8b, 0xb2, 0xee, 0x3f, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x65, 0x7c, - 0xd6, 0xa7, 0x10, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// ControlClient is the client API for Control service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ControlClient interface { - DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) - Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) - Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) - Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) - Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) - ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) -} - -type controlClient struct { - cc *grpc.ClientConn -} - -func NewControlClient(cc *grpc.ClientConn) ControlClient { - return &controlClient{cc} -} - -func (c *controlClient) DiskUsage(ctx context.Context, in *DiskUsageRequest, opts ...grpc.CallOption) (*DiskUsageResponse, error) { - out := new(DiskUsageResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/DiskUsage", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlClient) Prune(ctx context.Context, in *PruneRequest, opts ...grpc.CallOption) (Control_PruneClient, error) { - stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[0], "/moby.buildkit.v1.Control/Prune", opts...) - if err != nil { - return nil, err - } - x := &controlPruneClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Control_PruneClient interface { - Recv() (*UsageRecord, error) - grpc.ClientStream -} - -type controlPruneClient struct { - grpc.ClientStream -} - -func (x *controlPruneClient) Recv() (*UsageRecord, error) { - m := new(UsageRecord) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *controlClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { - out := new(SolveResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/Solve", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *controlClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (Control_StatusClient, error) { - stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[1], "/moby.buildkit.v1.Control/Status", opts...) - if err != nil { - return nil, err - } - x := &controlStatusClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Control_StatusClient interface { - Recv() (*StatusResponse, error) - grpc.ClientStream -} - -type controlStatusClient struct { - grpc.ClientStream -} - -func (x *controlStatusClient) Recv() (*StatusResponse, error) { - m := new(StatusResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *controlClient) Session(ctx context.Context, opts ...grpc.CallOption) (Control_SessionClient, error) { - stream, err := c.cc.NewStream(ctx, &_Control_serviceDesc.Streams[2], "/moby.buildkit.v1.Control/Session", opts...) - if err != nil { - return nil, err - } - x := &controlSessionClient{stream} - return x, nil -} - -type Control_SessionClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type controlSessionClient struct { - grpc.ClientStream -} - -func (x *controlSessionClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *controlSessionClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *controlClient) ListWorkers(ctx context.Context, in *ListWorkersRequest, opts ...grpc.CallOption) (*ListWorkersResponse, error) { - out := new(ListWorkersResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.Control/ListWorkers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ControlServer is the server API for Control service. -type ControlServer interface { - DiskUsage(context.Context, *DiskUsageRequest) (*DiskUsageResponse, error) - Prune(*PruneRequest, Control_PruneServer) error - Solve(context.Context, *SolveRequest) (*SolveResponse, error) - Status(*StatusRequest, Control_StatusServer) error - Session(Control_SessionServer) error - ListWorkers(context.Context, *ListWorkersRequest) (*ListWorkersResponse, error) -} - -// UnimplementedControlServer can be embedded to have forward compatible implementations. -type UnimplementedControlServer struct { -} - -func (*UnimplementedControlServer) DiskUsage(ctx context.Context, req *DiskUsageRequest) (*DiskUsageResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DiskUsage not implemented") -} -func (*UnimplementedControlServer) Prune(req *PruneRequest, srv Control_PruneServer) error { - return status.Errorf(codes.Unimplemented, "method Prune not implemented") -} -func (*UnimplementedControlServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") -} -func (*UnimplementedControlServer) Status(req *StatusRequest, srv Control_StatusServer) error { - return status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedControlServer) Session(srv Control_SessionServer) error { - return status.Errorf(codes.Unimplemented, "method Session not implemented") -} -func (*UnimplementedControlServer) ListWorkers(ctx context.Context, req *ListWorkersRequest) (*ListWorkersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ListWorkers not implemented") -} - -func RegisterControlServer(s *grpc.Server, srv ControlServer) { - s.RegisterService(&_Control_serviceDesc, srv) -} - -func _Control_DiskUsage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DiskUsageRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServer).DiskUsage(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/DiskUsage", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).DiskUsage(ctx, req.(*DiskUsageRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Control_Prune_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(PruneRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ControlServer).Prune(m, &controlPruneServer{stream}) -} - -type Control_PruneServer interface { - Send(*UsageRecord) error - grpc.ServerStream -} - -type controlPruneServer struct { - grpc.ServerStream -} - -func (x *controlPruneServer) Send(m *UsageRecord) error { - return x.ServerStream.SendMsg(m) -} - -func _Control_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SolveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServer).Solve(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/Solve", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).Solve(ctx, req.(*SolveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Control_Status_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(StatusRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(ControlServer).Status(m, &controlStatusServer{stream}) -} - -type Control_StatusServer interface { - Send(*StatusResponse) error - grpc.ServerStream -} - -type controlStatusServer struct { - grpc.ServerStream -} - -func (x *controlStatusServer) Send(m *StatusResponse) error { - return x.ServerStream.SendMsg(m) -} - -func _Control_Session_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(ControlServer).Session(&controlSessionServer{stream}) -} - -type Control_SessionServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type controlSessionServer struct { - grpc.ServerStream -} - -func (x *controlSessionServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *controlSessionServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _Control_ListWorkers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListWorkersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ControlServer).ListWorkers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.Control/ListWorkers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ControlServer).ListWorkers(ctx, req.(*ListWorkersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Control_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.v1.Control", - HandlerType: (*ControlServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "DiskUsage", - Handler: _Control_DiskUsage_Handler, - }, - { - MethodName: "Solve", - Handler: _Control_Solve_Handler, - }, - { - MethodName: "ListWorkers", - Handler: _Control_ListWorkers_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Prune", - Handler: _Control_Prune_Handler, - ServerStreams: true, - }, - { - StreamName: "Status", - Handler: _Control_Status_Handler, - ServerStreams: true, - }, - { - StreamName: "Session", - Handler: _Control_Session_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "control.proto", -} - -func (m *PruneRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PruneRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PruneRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.KeepBytes != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.KeepBytes)) - i-- - dAtA[i] = 0x20 - } - if m.KeepDuration != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.KeepDuration)) - i-- - dAtA[i] = 0x18 - } - if m.All { - i-- - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Filter) > 0 { - for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filter[iNdEx]) - copy(dAtA[i:], m.Filter[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DiskUsageRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DiskUsageRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DiskUsageRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filter) > 0 { - for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filter[iNdEx]) - copy(dAtA[i:], m.Filter[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *DiskUsageResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DiskUsageResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DiskUsageResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Record) > 0 { - for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *UsageRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UsageRecord) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UsageRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Shared { - i-- - if m.Shared { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - } - if len(m.RecordType) > 0 { - i -= len(m.RecordType) - copy(dAtA[i:], m.RecordType) - i = encodeVarintControl(dAtA, i, uint64(len(m.RecordType))) - i-- - dAtA[i] = 0x52 - } - if len(m.Description) > 0 { - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x4a - } - if m.UsageCount != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.UsageCount)) - i-- - dAtA[i] = 0x40 - } - if m.LastUsedAt != nil { - n1, err1 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastUsedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt):]) - if err1 != nil { - return 0, err1 - } - i -= n1 - i = encodeVarintControl(dAtA, i, uint64(n1)) - i-- - dAtA[i] = 0x3a - } - n2, err2 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt):]) - if err2 != nil { - return 0, err2 - } - i -= n2 - i = encodeVarintControl(dAtA, i, uint64(n2)) - i-- - dAtA[i] = 0x32 - if len(m.Parent) > 0 { - i -= len(m.Parent) - copy(dAtA[i:], m.Parent) - i = encodeVarintControl(dAtA, i, uint64(len(m.Parent))) - i-- - dAtA[i] = 0x2a - } - if m.Size_ != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x20 - } - if m.InUse { - i-- - if m.InUse { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.Mutable { - i-- - if m.Mutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SolveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.FrontendInputs) > 0 { - for k := range m.FrontendInputs { - v := m.FrontendInputs[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - if len(m.Entitlements) > 0 { - for iNdEx := len(m.Entitlements) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Entitlements[iNdEx]) - copy(dAtA[i:], m.Entitlements[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Entitlements[iNdEx]))) - i-- - dAtA[i] = 0x4a - } - } - { - size, err := m.Cache.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if len(m.FrontendAttrs) > 0 { - for k := range m.FrontendAttrs { - v := m.FrontendAttrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } - } - if len(m.Frontend) > 0 { - i -= len(m.Frontend) - copy(dAtA[i:], m.Frontend) - i = encodeVarintControl(dAtA, i, uint64(len(m.Frontend))) - i-- - dAtA[i] = 0x32 - } - if len(m.Session) > 0 { - i -= len(m.Session) - copy(dAtA[i:], m.Session) - i = encodeVarintControl(dAtA, i, uint64(len(m.Session))) - i-- - dAtA[i] = 0x2a - } - if len(m.ExporterAttrs) > 0 { - for k := range m.ExporterAttrs { - v := m.ExporterAttrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Exporter) > 0 { - i -= len(m.Exporter) - copy(dAtA[i:], m.Exporter) - i = encodeVarintControl(dAtA, i, uint64(len(m.Exporter))) - i-- - dAtA[i] = 0x1a - } - if m.Definition != nil { - { - size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CacheOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CacheOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Imports) > 0 { - for iNdEx := len(m.Imports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Imports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Exports) > 0 { - for iNdEx := len(m.Exports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Exports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.ExportAttrsDeprecated) > 0 { - for k := range m.ExportAttrsDeprecated { - v := m.ExportAttrsDeprecated[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.ImportRefsDeprecated) > 0 { - for iNdEx := len(m.ImportRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ImportRefsDeprecated[iNdEx]) - copy(dAtA[i:], m.ImportRefsDeprecated[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.ImportRefsDeprecated[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ExportRefDeprecated) > 0 { - i -= len(m.ExportRefDeprecated) - copy(dAtA[i:], m.ExportRefDeprecated) - i = encodeVarintControl(dAtA, i, uint64(len(m.ExportRefDeprecated))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Attrs) > 0 { - for k := range m.Attrs { - v := m.Attrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintControl(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SolveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ExporterResponse) > 0 { - for k := range m.ExporterResponse { - v := m.ExporterResponse[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintControl(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintControl(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintControl(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *StatusRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintControl(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatusResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Logs) > 0 { - for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Logs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Statuses) > 0 { - for iNdEx := len(m.Statuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Statuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Vertexes) > 0 { - for iNdEx := len(m.Vertexes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Vertexes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Vertex) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Vertex) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Error) > 0 { - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintControl(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x3a - } - if m.Completed != nil { - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintControl(dAtA, i, uint64(n6)) - i-- - dAtA[i] = 0x32 - } - if m.Started != nil { - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) - if err7 != nil { - return 0, err7 - } - i -= n7 - i = encodeVarintControl(dAtA, i, uint64(n7)) - i-- - dAtA[i] = 0x2a - } - if m.Cached { - i-- - if m.Cached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - } - if len(m.Inputs) > 0 { - for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Inputs[iNdEx]) - copy(dAtA[i:], m.Inputs[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Inputs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintControl(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VertexStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VertexStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Completed != nil { - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintControl(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x42 - } - if m.Started != nil { - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) - if err9 != nil { - return 0, err9 - } - i -= n9 - i = encodeVarintControl(dAtA, i, uint64(n9)) - i-- - dAtA[i] = 0x3a - } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err10 != nil { - return 0, err10 - } - i -= n10 - i = encodeVarintControl(dAtA, i, uint64(n10)) - i-- - dAtA[i] = 0x32 - if m.Total != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Total)) - i-- - dAtA[i] = 0x28 - } - if m.Current != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Current)) - i-- - dAtA[i] = 0x20 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - } - if len(m.Vertex) > 0 { - i -= len(m.Vertex) - copy(dAtA[i:], m.Vertex) - i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) - i-- - dAtA[i] = 0x12 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintControl(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VertexLog) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VertexLog) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Msg) > 0 { - i -= len(m.Msg) - copy(dAtA[i:], m.Msg) - i = encodeVarintControl(dAtA, i, uint64(len(m.Msg))) - i-- - dAtA[i] = 0x22 - } - if m.Stream != 0 { - i = encodeVarintControl(dAtA, i, uint64(m.Stream)) - i-- - dAtA[i] = 0x18 - } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err11 != nil { - return 0, err11 - } - i -= n11 - i = encodeVarintControl(dAtA, i, uint64(n11)) - i-- - dAtA[i] = 0x12 - if len(m.Vertex) > 0 { - i -= len(m.Vertex) - copy(dAtA[i:], m.Vertex) - i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *BytesMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintControl(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ListWorkersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListWorkersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListWorkersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filter) > 0 { - for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filter[iNdEx]) - copy(dAtA[i:], m.Filter[iNdEx]) - i = encodeVarintControl(dAtA, i, uint64(len(m.Filter[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ListWorkersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListWorkersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListWorkersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Record) > 0 { - for iNdEx := len(m.Record) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Record[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintControl(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintControl(dAtA []byte, offset int, v uint64) int { - offset -= sovControl(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PruneRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - if m.All { - n += 2 - } - if m.KeepDuration != 0 { - n += 1 + sovControl(uint64(m.KeepDuration)) - } - if m.KeepBytes != 0 { - n += 1 + sovControl(uint64(m.KeepBytes)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DiskUsageRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DiskUsageResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Record) > 0 { - for _, e := range m.Record { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *UsageRecord) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Mutable { - n += 2 - } - if m.InUse { - n += 2 - } - if m.Size_ != 0 { - n += 1 + sovControl(uint64(m.Size_)) - } - l = len(m.Parent) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt) - n += 1 + l + sovControl(uint64(l)) - if m.LastUsedAt != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastUsedAt) - n += 1 + l + sovControl(uint64(l)) - } - if m.UsageCount != 0 { - n += 1 + sovControl(uint64(m.UsageCount)) - } - l = len(m.Description) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.RecordType) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Shared { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SolveRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Definition != nil { - l = m.Definition.Size() - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Exporter) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.ExporterAttrs) > 0 { - for k, v := range m.ExporterAttrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - l = len(m.Session) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Frontend) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.FrontendAttrs) > 0 { - for k, v := range m.FrontendAttrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - l = m.Cache.Size() - n += 1 + l + sovControl(uint64(l)) - if len(m.Entitlements) > 0 { - for _, s := range m.Entitlements { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.FrontendInputs) > 0 { - for k, v := range m.FrontendInputs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovControl(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + l - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CacheOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ExportRefDeprecated) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.ImportRefsDeprecated) > 0 { - for _, s := range m.ImportRefsDeprecated { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.ExportAttrsDeprecated) > 0 { - for k, v := range m.ExportAttrsDeprecated { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - if len(m.Exports) > 0 { - for _, e := range m.Exports { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Imports) > 0 { - for _, e := range m.Imports { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CacheOptionsEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SolveResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.ExporterResponse) > 0 { - for k, v := range m.ExporterResponse { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) - n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Vertexes) > 0 { - for _, e := range m.Vertexes { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Statuses) > 0 { - for _, e := range m.Statuses { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if len(m.Logs) > 0 { - for _, e := range m.Logs { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Vertex) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if len(m.Inputs) > 0 { - for _, s := range m.Inputs { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Cached { - n += 2 - } - if m.Started != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) - n += 1 + l + sovControl(uint64(l)) - } - if m.Completed != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Error) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VertexStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Vertex) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.Current != 0 { - n += 1 + sovControl(uint64(m.Current)) - } - if m.Total != 0 { - n += 1 + sovControl(uint64(m.Total)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovControl(uint64(l)) - if m.Started != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started) - n += 1 + l + sovControl(uint64(l)) - } - if m.Completed != nil { - l = github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed) - n += 1 + l + sovControl(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *VertexLog) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Vertex) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovControl(uint64(l)) - if m.Stream != 0 { - n += 1 + sovControl(uint64(m.Stream)) - } - l = len(m.Msg) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *BytesMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovControl(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListWorkersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovControl(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListWorkersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Record) > 0 { - for _, e := range m.Record { - l = e.Size() - n += 1 + l + sovControl(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovControl(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozControl(x uint64) (n int) { - return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *PruneRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PruneRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PruneRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) - } - m.KeepDuration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepDuration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) - } - m.KeepBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepBytes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DiskUsageRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiskUsageRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiskUsageRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DiskUsageResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DiskUsageResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DiskUsageResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Record = append(m.Record, &UsageRecord{}) - if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UsageRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UsageRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UsageRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mutable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Mutable = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InUse", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.InUse = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Parent = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUsedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastUsedAt == nil { - m.LastUsedAt = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.LastUsedAt, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UsageCount", wireType) - } - m.UsageCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UsageCount |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RecordType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RecordType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Shared", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Shared = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definition == nil { - m.Definition = &pb.Definition{} - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exporter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exporter = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExporterAttrs == nil { - m.ExporterAttrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ExporterAttrs[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Session", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Session = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Frontend = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAttrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FrontendAttrs == nil { - m.FrontendAttrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.FrontendAttrs[mapkey] = mapvalue - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Cache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entitlements", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entitlements = append(m.Entitlements, github_com_moby_buildkit_util_entitlements.Entitlement(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FrontendInputs == nil { - m.FrontendInputs = make(map[string]*pb.Definition) - } - var mapkey string - var mapvalue *pb.Definition - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthControl - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthControl - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &pb.Definition{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.FrontendInputs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportRefDeprecated", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExportRefDeprecated = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportRefsDeprecated", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImportRefsDeprecated = append(m.ImportRefsDeprecated, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportAttrsDeprecated", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExportAttrsDeprecated == nil { - m.ExportAttrsDeprecated = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ExportAttrsDeprecated[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Exports = append(m.Exports, &CacheOptionsEntry{}) - if err := m.Exports[len(m.Exports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Imports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Imports = append(m.Imports, &CacheOptionsEntry{}) - if err := m.Imports[len(m.Imports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attrs == nil { - m.Attrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterResponse", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExporterResponse == nil { - m.ExporterResponse = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthControl - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthControl - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.ExporterResponse[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertexes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertexes = append(m.Vertexes, &Vertex{}) - if err := m.Vertexes[len(m.Vertexes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Statuses = append(m.Statuses, &VertexStatus{}) - if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Logs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Logs = append(m.Logs, &VertexLog{}) - if err := m.Logs[len(m.Logs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Vertex) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Vertex: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Vertex: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Inputs = append(m.Inputs, github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cached", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Cached = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Started == nil { - m.Started = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Completed == nil { - m.Completed = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VertexStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VertexStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VertexStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - m.Current = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Current |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) - } - m.Total = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Total |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Started == nil { - m.Started = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Started, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Completed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Completed == nil { - m.Completed = new(time.Time) - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(m.Completed, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VertexLog) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VertexLog: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VertexLog: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) - } - m.Stream = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Stream |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Msg", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Msg = append(m.Msg[:0], dAtA[iNdEx:postIndex]...) - if m.Msg == nil { - m.Msg = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BytesMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListWorkersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListWorkersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListWorkersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListWorkersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListWorkersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListWorkersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowControl - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthControl - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthControl - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Record = append(m.Record, &types.WorkerRecord{}) - if err := m.Record[len(m.Record)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipControl(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthControl - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipControl(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowControl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowControl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowControl - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthControl - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupControl - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthControl - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupControl = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto deleted file mode 100644 index 84a4680c4ba6..000000000000 --- a/vendor/github.com/moby/buildkit/api/services/control/control.proto +++ /dev/null @@ -1,147 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "google/protobuf/timestamp.proto"; -import "github.com/moby/buildkit/solver/pb/ops.proto"; -import "github.com/moby/buildkit/api/types/worker.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service Control { - rpc DiskUsage(DiskUsageRequest) returns (DiskUsageResponse); - rpc Prune(PruneRequest) returns (stream UsageRecord); - rpc Solve(SolveRequest) returns (SolveResponse); - rpc Status(StatusRequest) returns (stream StatusResponse); - rpc Session(stream BytesMessage) returns (stream BytesMessage); - rpc ListWorkers(ListWorkersRequest) returns (ListWorkersResponse); - // rpc Info(InfoRequest) returns (InfoResponse); -} - -message PruneRequest { - repeated string filter = 1; - bool all = 2; - int64 keepDuration = 3 [(gogoproto.nullable) = true]; - int64 keepBytes = 4 [(gogoproto.nullable) = true]; -} - -message DiskUsageRequest { - repeated string filter = 1; -} - -message DiskUsageResponse { - repeated UsageRecord record = 1; -} - -message UsageRecord { - string ID = 1; - bool Mutable = 2; - bool InUse = 3; - int64 Size = 4; - string Parent = 5; - google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true]; - int64 UsageCount = 8; - string Description = 9; - string RecordType = 10; - bool Shared = 11; -} - -message SolveRequest { - string Ref = 1; - pb.Definition Definition = 2; - string Exporter = 3; - map ExporterAttrs = 4; - string Session = 5; - string Frontend = 6; - map FrontendAttrs = 7; - CacheOptions Cache = 8 [(gogoproto.nullable) = false]; - repeated string Entitlements = 9 [(gogoproto.customtype) = "github.com/moby/buildkit/util/entitlements.Entitlement" ]; - map FrontendInputs = 10; -} - -message CacheOptions { - // ExportRefDeprecated is deprecated in favor or the new Exports since BuildKit v0.4.0. - // When ExportRefDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = ExportAttrs.add("ref", ExportRef)} - // to Exports for compatibility. (planned to be removed) - string ExportRefDeprecated = 1; - // ImportRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importRef}} - // for each of the ImportRefs entry to Imports for compatibility. (planned to be removed) - repeated string ImportRefsDeprecated = 2; - // ExportAttrsDeprecated is deprecated since BuildKit v0.4.0. - // See the description of ExportRefDeprecated. - map ExportAttrsDeprecated = 3; - // Exports was introduced in BuildKit v0.4.0. - repeated CacheOptionsEntry Exports = 4; - // Imports was introduced in BuildKit v0.4.0. - repeated CacheOptionsEntry Imports = 5; -} - -message CacheOptionsEntry { - // Type is like "registry" or "local" - string Type = 1; - // Attrs are like mode=(min,max), ref=example.com:5000/foo/bar . - // See cache importer/exporter implementations' documentation. - map Attrs = 2; -} - -message SolveResponse { - map ExporterResponse = 1; -} - -message StatusRequest { - string Ref = 1; -} - -message StatusResponse { - repeated Vertex vertexes = 1; - repeated VertexStatus statuses = 2; - repeated VertexLog logs = 3; -} - -message Vertex { - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - repeated string inputs = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - string name = 3; - bool cached = 4; - google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ]; - google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ]; - string error = 7; // typed errors? -} - -message VertexStatus { - string ID = 1; - string vertex = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - string name = 3; - int64 current = 4; - int64 total = 5; - // TODO: add started, completed - google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ]; - google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ]; -} - -message VertexLog { - string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - google.protobuf.Timestamp timestamp = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; - int64 stream = 3; - bytes msg = 4; -} - -message BytesMessage { - bytes data = 1; -} - -message ListWorkersRequest { - repeated string filter = 1; // containerd style -} - -message ListWorkersResponse { - repeated moby.buildkit.v1.types.WorkerRecord record = 1; -} diff --git a/vendor/github.com/moby/buildkit/api/services/control/generate.go b/vendor/github.com/moby/buildkit/api/services/control/generate.go deleted file mode 100644 index 9a3b24613e17..000000000000 --- a/vendor/github.com/moby/buildkit/api/services/control/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1 //nolint:golint - -//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto diff --git a/vendor/github.com/moby/buildkit/api/types/generate.go b/vendor/github.com/moby/buildkit/api/types/generate.go deleted file mode 100644 index 984bb74ce1ec..000000000000 --- a/vendor/github.com/moby/buildkit/api/types/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1_types //nolint:golint - -//go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto diff --git a/vendor/github.com/moby/buildkit/api/types/worker.pb.go b/vendor/github.com/moby/buildkit/api/types/worker.pb.go deleted file mode 100644 index 54cbd605e14c..000000000000 --- a/vendor/github.com/moby/buildkit/api/types/worker.pb.go +++ /dev/null @@ -1,923 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: worker.proto - -package moby_buildkit_v1_types - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - pb "github.com/moby/buildkit/solver/pb" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type WorkerRecord struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Labels map[string]string `protobuf:"bytes,2,rep,name=Labels,proto3" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms,proto3" json:"platforms"` - GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy,proto3" json:"GCPolicy,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *WorkerRecord) Reset() { *m = WorkerRecord{} } -func (m *WorkerRecord) String() string { return proto.CompactTextString(m) } -func (*WorkerRecord) ProtoMessage() {} -func (*WorkerRecord) Descriptor() ([]byte, []int) { - return fileDescriptor_e4ff6184b07e587a, []int{0} -} -func (m *WorkerRecord) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkerRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_WorkerRecord.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *WorkerRecord) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkerRecord.Merge(m, src) -} -func (m *WorkerRecord) XXX_Size() int { - return m.Size() -} -func (m *WorkerRecord) XXX_DiscardUnknown() { - xxx_messageInfo_WorkerRecord.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkerRecord proto.InternalMessageInfo - -func (m *WorkerRecord) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *WorkerRecord) GetLabels() map[string]string { - if m != nil { - return m.Labels - } - return nil -} - -func (m *WorkerRecord) GetPlatforms() []pb.Platform { - if m != nil { - return m.Platforms - } - return nil -} - -func (m *WorkerRecord) GetGCPolicy() []*GCPolicy { - if m != nil { - return m.GCPolicy - } - return nil -} - -type GCPolicy struct { - All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"` - KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"` - KeepBytes int64 `protobuf:"varint,3,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"` - Filters []string `protobuf:"bytes,4,rep,name=filters,proto3" json:"filters,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *GCPolicy) Reset() { *m = GCPolicy{} } -func (m *GCPolicy) String() string { return proto.CompactTextString(m) } -func (*GCPolicy) ProtoMessage() {} -func (*GCPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_e4ff6184b07e587a, []int{1} -} -func (m *GCPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GCPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GCPolicy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GCPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_GCPolicy.Merge(m, src) -} -func (m *GCPolicy) XXX_Size() int { - return m.Size() -} -func (m *GCPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_GCPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_GCPolicy proto.InternalMessageInfo - -func (m *GCPolicy) GetAll() bool { - if m != nil { - return m.All - } - return false -} - -func (m *GCPolicy) GetKeepDuration() int64 { - if m != nil { - return m.KeepDuration - } - return 0 -} - -func (m *GCPolicy) GetKeepBytes() int64 { - if m != nil { - return m.KeepBytes - } - return 0 -} - -func (m *GCPolicy) GetFilters() []string { - if m != nil { - return m.Filters - } - return nil -} - -func init() { - proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.types.WorkerRecord.LabelsEntry") - proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy") -} - -func init() { proto.RegisterFile("worker.proto", fileDescriptor_e4ff6184b07e587a) } - -var fileDescriptor_e4ff6184b07e587a = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40, - 0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15, - 0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01, - 0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89, - 0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41, - 0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13, - 0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30, - 0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3, - 0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b, - 0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07, - 0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91, - 0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb, - 0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf, - 0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0, - 0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b, - 0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65, - 0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70, - 0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5, - 0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8, - 0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99, - 0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6, - 0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29, - 0x02, 0x00, 0x00, -} - -func (m *WorkerRecord) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkerRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.GCPolicy) > 0 { - for iNdEx := len(m.GCPolicy) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.GCPolicy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintWorker(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Platforms) > 0 { - for iNdEx := len(m.Platforms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Platforms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintWorker(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Labels) > 0 { - for k := range m.Labels { - v := m.Labels[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintWorker(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintWorker(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintWorker(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintWorker(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GCPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GCPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GCPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Filters) > 0 { - for iNdEx := len(m.Filters) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filters[iNdEx]) - copy(dAtA[i:], m.Filters[iNdEx]) - i = encodeVarintWorker(dAtA, i, uint64(len(m.Filters[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if m.KeepBytes != 0 { - i = encodeVarintWorker(dAtA, i, uint64(m.KeepBytes)) - i-- - dAtA[i] = 0x18 - } - if m.KeepDuration != 0 { - i = encodeVarintWorker(dAtA, i, uint64(m.KeepDuration)) - i-- - dAtA[i] = 0x10 - } - if m.All { - i-- - if m.All { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintWorker(dAtA []byte, offset int, v uint64) int { - offset -= sovWorker(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *WorkerRecord) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovWorker(uint64(l)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovWorker(uint64(len(k))) + 1 + len(v) + sovWorker(uint64(len(v))) - n += mapEntrySize + 1 + sovWorker(uint64(mapEntrySize)) - } - } - if len(m.Platforms) > 0 { - for _, e := range m.Platforms { - l = e.Size() - n += 1 + l + sovWorker(uint64(l)) - } - } - if len(m.GCPolicy) > 0 { - for _, e := range m.GCPolicy { - l = e.Size() - n += 1 + l + sovWorker(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GCPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.All { - n += 2 - } - if m.KeepDuration != 0 { - n += 1 + sovWorker(uint64(m.KeepDuration)) - } - if m.KeepBytes != 0 { - n += 1 + sovWorker(uint64(m.KeepBytes)) - } - if len(m.Filters) > 0 { - for _, s := range m.Filters { - l = len(s) - n += 1 + l + sovWorker(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovWorker(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozWorker(x uint64) (n int) { - return sovWorker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *WorkerRecord) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkerRecord: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkerRecord: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorker - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWorker - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthWorker - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthWorker - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthWorker - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthWorker - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipWorker(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthWorker - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWorker - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Platforms = append(m.Platforms, pb.Platform{}) - if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCPolicy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWorker - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.GCPolicy = append(m.GCPolicy, &GCPolicy{}) - if err := m.GCPolicy[len(m.GCPolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWorker(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthWorker - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field All", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.All = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType) - } - m.KeepDuration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepDuration |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType) - } - m.KeepBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.KeepBytes |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWorker - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthWorker - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthWorker - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWorker(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthWorker - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipWorker(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWorker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWorker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWorker - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthWorker - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupWorker - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthWorker - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthWorker = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowWorker = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupWorker = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/api/types/worker.proto b/vendor/github.com/moby/buildkit/api/types/worker.proto deleted file mode 100644 index 82dd7ad65145..000000000000 --- a/vendor/github.com/moby/buildkit/api/types/worker.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1.types; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/moby/buildkit/solver/pb/ops.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -message WorkerRecord { - string ID = 1; - map Labels = 2; - repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false]; - repeated GCPolicy GCPolicy = 4; -} - -message GCPolicy { - bool all = 1; - int64 keepDuration = 2; - int64 keepBytes = 3; - repeated string filters = 4; -} diff --git a/vendor/github.com/moby/buildkit/client/build.go b/vendor/github.com/moby/buildkit/client/build.go deleted file mode 100644 index 4cb91d7aa98a..000000000000 --- a/vendor/github.com/moby/buildkit/client/build.go +++ /dev/null @@ -1,150 +0,0 @@ -package client - -import ( - "context" - - "github.com/moby/buildkit/client/buildid" - gateway "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/frontend/gateway/grpcclient" - gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/apicaps" - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildFunc gateway.BuildFunc, statusChan chan *SolveStatus) (*SolveResponse, error) { - defer func() { - if statusChan != nil { - close(statusChan) - } - }() - - if opt.Frontend != "" { - return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend") - } - - if product == "" { - product = apicaps.ExportedProduct - } - - feOpts := opt.FrontendAttrs - opt.FrontendAttrs = nil - - workers, err := c.ListWorkers(ctx) - if err != nil { - return nil, errors.Wrap(err, "listing workers for Build") - } - var gworkers []gateway.WorkerInfo - for _, w := range workers { - gworkers = append(gworkers, gateway.WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: w.Platforms, - }) - } - - cb := func(ref string, s *session.Session) error { - gwClient := c.gatewayClientForBuild(ref) - g, err := grpcclient.New(ctx, feOpts, s.ID(), product, gwClient, gworkers) - if err != nil { - return err - } - - gwClient.caps = g.BuildOpts().Caps - - if err := g.Run(ctx, buildFunc); err != nil { - return errors.Wrap(err, "failed to run Build function") - } - return nil - } - - return c.solve(ctx, nil, cb, opt, statusChan) -} - -func (c *Client) gatewayClientForBuild(buildid string) *gatewayClientForBuild { - g := gatewayapi.NewLLBBridgeClient(c.conn) - return &gatewayClientForBuild{ - gateway: g, - buildID: buildid, - } -} - -type gatewayClientForBuild struct { - gateway gatewayapi.LLBBridgeClient - buildID string - caps apicaps.CapSet -} - -func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gatewayapi.ResolveImageConfigRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveImageConfigResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ResolveImageConfig(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) Solve(ctx context.Context, in *gatewayapi.SolveRequest, opts ...grpc.CallOption) (*gatewayapi.SolveResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.Solve(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.ReadFileRequest, opts ...grpc.CallOption) (*gatewayapi.ReadFileResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ReadFile(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) { - if err := g.caps.Supports(gatewayapi.CapReadDir); err != nil { - return nil, err - } - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ReadDir(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) { - if err := g.caps.Supports(gatewayapi.CapStatFile); err != nil { - return nil, err - } - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.StatFile(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.Ping(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) Return(ctx context.Context, in *gatewayapi.ReturnRequest, opts ...grpc.CallOption) (*gatewayapi.ReturnResponse, error) { - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.Return(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) Inputs(ctx context.Context, in *gatewayapi.InputsRequest, opts ...grpc.CallOption) (*gatewayapi.InputsResponse, error) { - if err := g.caps.Supports(gatewayapi.CapFrontendInputs); err != nil { - return nil, err - } - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.Inputs(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) NewContainer(ctx context.Context, in *gatewayapi.NewContainerRequest, opts ...grpc.CallOption) (*gatewayapi.NewContainerResponse, error) { - if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { - return nil, err - } - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.NewContainer(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) ReleaseContainer(ctx context.Context, in *gatewayapi.ReleaseContainerRequest, opts ...grpc.CallOption) (*gatewayapi.ReleaseContainerResponse, error) { - if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { - return nil, err - } - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ReleaseContainer(ctx, in, opts...) -} - -func (g *gatewayClientForBuild) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (gatewayapi.LLBBridge_ExecProcessClient, error) { - if err := g.caps.Supports(gatewayapi.CapGatewayExec); err != nil { - return nil, err - } - ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) - return g.gateway.ExecProcess(ctx, opts...) -} diff --git a/vendor/github.com/moby/buildkit/client/buildid/metadata.go b/vendor/github.com/moby/buildkit/client/buildid/metadata.go deleted file mode 100644 index bb169b8fe4ec..000000000000 --- a/vendor/github.com/moby/buildkit/client/buildid/metadata.go +++ /dev/null @@ -1,29 +0,0 @@ -package buildid - -import ( - "context" - - "google.golang.org/grpc/metadata" -) - -var metadataKey = "buildkit-controlapi-buildid" - -func AppendToOutgoingContext(ctx context.Context, id string) context.Context { - if id != "" { - return metadata.AppendToOutgoingContext(ctx, metadataKey, id) - } - return ctx -} - -func FromIncomingContext(ctx context.Context) string { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "" - } - - if ids := md.Get(metadataKey); len(ids) == 1 { - return ids[0] - } - - return "" -} diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go deleted file mode 100644 index 38429ff5bbb1..000000000000 --- a/vendor/github.com/moby/buildkit/client/client.go +++ /dev/null @@ -1,203 +0,0 @@ -package client - -import ( - "context" - "crypto/tls" - "crypto/x509" - "io/ioutil" - "net" - "net/url" - - "github.com/containerd/containerd/defaults" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/client/connhelper" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/grpchijack" - "github.com/moby/buildkit/util/appdefaults" - "github.com/moby/buildkit/util/grpcerrors" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" -) - -type Client struct { - conn *grpc.ClientConn -} - -type ClientOpt interface{} - -// New returns a new buildkit client. Address can be empty for the system-default address. -func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) { - gopts := []grpc.DialOption{ - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(defaults.DefaultMaxRecvMsgSize)), - grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(defaults.DefaultMaxSendMsgSize)), - } - needDialer := true - needWithInsecure := true - - var unary []grpc.UnaryClientInterceptor - var stream []grpc.StreamClientInterceptor - - for _, o := range opts { - if _, ok := o.(*withFailFast); ok { - gopts = append(gopts, grpc.FailOnNonTempDialError(true)) - } - if credInfo, ok := o.(*withCredentials); ok { - opt, err := loadCredentials(credInfo) - if err != nil { - return nil, err - } - gopts = append(gopts, opt) - needWithInsecure = false - } - if wt, ok := o.(*withTracer); ok { - unary = append(unary, otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())) - stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)) - } - if wd, ok := o.(*withDialer); ok { - gopts = append(gopts, grpc.WithContextDialer(wd.dialer)) - needDialer = false - } - } - if needDialer { - dialFn, err := resolveDialer(address) - if err != nil { - return nil, err - } - gopts = append(gopts, grpc.WithContextDialer(dialFn)) - } - if needWithInsecure { - gopts = append(gopts, grpc.WithInsecure()) - } - if address == "" { - address = appdefaults.Address - } - - // grpc-go uses a slightly different naming scheme: https://github.com/grpc/grpc/blob/master/doc/naming.md - // This will end up setting rfc non-complient :authority header to address string (e.g. tcp://127.0.0.1:1234). - // So, here sets right authority header via WithAuthority DialOption. - addressURL, err := url.Parse(address) - if err != nil { - return nil, err - } - gopts = append(gopts, grpc.WithAuthority(addressURL.Host)) - - unary = append(unary, grpcerrors.UnaryClientInterceptor) - stream = append(stream, grpcerrors.StreamClientInterceptor) - - if len(unary) == 1 { - gopts = append(gopts, grpc.WithUnaryInterceptor(unary[0])) - } else if len(unary) > 1 { - gopts = append(gopts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...))) - } - - if len(stream) == 1 { - gopts = append(gopts, grpc.WithStreamInterceptor(stream[0])) - } else if len(stream) > 1 { - gopts = append(gopts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...))) - } - - conn, err := grpc.DialContext(ctx, address, gopts...) - if err != nil { - return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address) - } - c := &Client{ - conn: conn, - } - return c, nil -} - -func (c *Client) controlClient() controlapi.ControlClient { - return controlapi.NewControlClient(c.conn) -} - -func (c *Client) Dialer() session.Dialer { - return grpchijack.Dialer(c.controlClient()) -} - -func (c *Client) Close() error { - return c.conn.Close() -} - -type withFailFast struct{} - -func WithFailFast() ClientOpt { - return &withFailFast{} -} - -type withDialer struct { - dialer func(context.Context, string) (net.Conn, error) -} - -func WithContextDialer(df func(context.Context, string) (net.Conn, error)) ClientOpt { - return &withDialer{dialer: df} -} - -type withCredentials struct { - ServerName string - CACert string - Cert string - Key string -} - -// WithCredentials configures the TLS parameters of the client. -// Arguments: -// * serverName: specifies the name of the target server -// * ca: specifies the filepath of the CA certificate to use for verification -// * cert: specifies the filepath of the client certificate -// * key: specifies the filepath of the client key -func WithCredentials(serverName, ca, cert, key string) ClientOpt { - return &withCredentials{serverName, ca, cert, key} -} - -func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { - ca, err := ioutil.ReadFile(opts.CACert) - if err != nil { - return nil, errors.Wrap(err, "could not read ca certificate") - } - - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(ca); !ok { - return nil, errors.New("failed to append ca certs") - } - - cfg := &tls.Config{ - ServerName: opts.ServerName, - RootCAs: certPool, - } - - // we will produce an error if the user forgot about either cert or key if at least one is specified - if opts.Cert != "" || opts.Key != "" { - cert, err := tls.LoadX509KeyPair(opts.Cert, opts.Key) - if err != nil { - return nil, errors.Wrap(err, "could not read certificate/key") - } - cfg.Certificates = []tls.Certificate{cert} - cfg.BuildNameToCertificate() - } - - return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil -} - -func WithTracer(t opentracing.Tracer) ClientOpt { - return &withTracer{t} -} - -type withTracer struct { - tracer opentracing.Tracer -} - -func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) { - ch, err := connhelper.GetConnectionHelper(address) - if err != nil { - return nil, err - } - if ch != nil { - return ch.ContextDialer, nil - } - // basic dialer - return dialer, nil -} diff --git a/vendor/github.com/moby/buildkit/client/client_unix.go b/vendor/github.com/moby/buildkit/client/client_unix.go deleted file mode 100644 index 888a8173ad06..000000000000 --- a/vendor/github.com/moby/buildkit/client/client_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows - -package client - -import ( - "context" - "net" - "strings" - - "github.com/pkg/errors" -) - -func dialer(ctx context.Context, address string) (net.Conn, error) { - addrParts := strings.SplitN(address, "://", 2) - if len(addrParts) != 2 { - return nil, errors.Errorf("invalid address %s", address) - } - var d net.Dialer - return d.DialContext(ctx, addrParts[0], addrParts[1]) -} diff --git a/vendor/github.com/moby/buildkit/client/client_windows.go b/vendor/github.com/moby/buildkit/client/client_windows.go deleted file mode 100644 index a9eb87f2483f..000000000000 --- a/vendor/github.com/moby/buildkit/client/client_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -package client - -import ( - "context" - "net" - "strings" - - winio "github.com/Microsoft/go-winio" - "github.com/pkg/errors" -) - -func dialer(ctx context.Context, address string) (net.Conn, error) { - addrParts := strings.SplitN(address, "://", 2) - if len(addrParts) != 2 { - return nil, errors.Errorf("invalid address %s", address) - } - switch addrParts[0] { - case "npipe": - address = strings.Replace(addrParts[1], "/", "\\", -1) - return winio.DialPipeContext(ctx, address) - default: - var d net.Dialer - return d.DialContext(ctx, addrParts[0], addrParts[1]) - } -} diff --git a/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go b/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go deleted file mode 100644 index 211df28e978a..000000000000 --- a/vendor/github.com/moby/buildkit/client/connhelper/connhelper.go +++ /dev/null @@ -1,38 +0,0 @@ -// Package connhelper provides helpers for connecting to a remote daemon host -// with custom logic. -package connhelper - -import ( - "context" - "net" - "net/url" -) - -var helpers = map[string]func(*url.URL) (*ConnectionHelper, error){} - -// ConnectionHelper allows to connect to a remote host with custom stream provider binary. -type ConnectionHelper struct { - // ContextDialer can be passed to grpc.WithContextDialer - ContextDialer func(ctx context.Context, addr string) (net.Conn, error) -} - -// GetConnectionHelper returns BuildKit-specific connection helper for the given URL. -// GetConnectionHelper returns nil without error when no helper is registered for the scheme. -func GetConnectionHelper(daemonURL string) (*ConnectionHelper, error) { - u, err := url.Parse(daemonURL) - if err != nil { - return nil, err - } - - fn, ok := helpers[u.Scheme] - if !ok { - return nil, nil - } - - return fn(u) -} - -// Register registers new connectionhelper for scheme -func Register(scheme string, fn func(*url.URL) (*ConnectionHelper, error)) { - helpers[scheme] = fn -} diff --git a/vendor/github.com/moby/buildkit/client/diskusage.go b/vendor/github.com/moby/buildkit/client/diskusage.go deleted file mode 100644 index 8034f977c173..000000000000 --- a/vendor/github.com/moby/buildkit/client/diskusage.go +++ /dev/null @@ -1,84 +0,0 @@ -package client - -import ( - "context" - "sort" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/pkg/errors" -) - -type UsageInfo struct { - ID string - Mutable bool - InUse bool - Size int64 - - CreatedAt time.Time - LastUsedAt *time.Time - UsageCount int - Parent string - Description string - RecordType UsageRecordType - Shared bool -} - -func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*UsageInfo, error) { - info := &DiskUsageInfo{} - for _, o := range opts { - o.SetDiskUsageOption(info) - } - - req := &controlapi.DiskUsageRequest{Filter: info.Filter} - resp, err := c.controlClient().DiskUsage(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "failed to call diskusage") - } - - var du []*UsageInfo - - for _, d := range resp.Record { - du = append(du, &UsageInfo{ - ID: d.ID, - Mutable: d.Mutable, - InUse: d.InUse, - Size: d.Size_, - Parent: d.Parent, - CreatedAt: d.CreatedAt, - Description: d.Description, - UsageCount: int(d.UsageCount), - LastUsedAt: d.LastUsedAt, - RecordType: UsageRecordType(d.RecordType), - Shared: d.Shared, - }) - } - - sort.Slice(du, func(i, j int) bool { - if du[i].Size == du[j].Size { - return du[i].ID > du[j].ID - } - return du[i].Size > du[j].Size - }) - - return du, nil -} - -type DiskUsageOption interface { - SetDiskUsageOption(*DiskUsageInfo) -} - -type DiskUsageInfo struct { - Filter []string -} - -type UsageRecordType string - -const ( - UsageRecordTypeInternal UsageRecordType = "internal" - UsageRecordTypeFrontend UsageRecordType = "frontend" - UsageRecordTypeLocalSource UsageRecordType = "source.local" - UsageRecordTypeGitCheckout UsageRecordType = "source.git.checkout" - UsageRecordTypeCacheMount UsageRecordType = "exec.cachemount" - UsageRecordTypeRegular UsageRecordType = "regular" -) diff --git a/vendor/github.com/moby/buildkit/client/exporters.go b/vendor/github.com/moby/buildkit/client/exporters.go deleted file mode 100644 index 0f70d59c8726..000000000000 --- a/vendor/github.com/moby/buildkit/client/exporters.go +++ /dev/null @@ -1,9 +0,0 @@ -package client - -const ( - ExporterImage = "image" - ExporterLocal = "local" - ExporterTar = "tar" - ExporterOCI = "oci" - ExporterDocker = "docker" -) diff --git a/vendor/github.com/moby/buildkit/client/filter.go b/vendor/github.com/moby/buildkit/client/filter.go deleted file mode 100644 index b05fe59d088c..000000000000 --- a/vendor/github.com/moby/buildkit/client/filter.go +++ /dev/null @@ -1,19 +0,0 @@ -package client - -func WithFilter(f []string) Filter { - return Filter(f) -} - -type Filter []string - -func (f Filter) SetDiskUsageOption(di *DiskUsageInfo) { - di.Filter = f -} - -func (f Filter) SetPruneOption(pi *PruneInfo) { - pi.Filter = f -} - -func (f Filter) SetListWorkersOption(lwi *ListWorkersInfo) { - lwi.Filter = f -} diff --git a/vendor/github.com/moby/buildkit/client/graph.go b/vendor/github.com/moby/buildkit/client/graph.go deleted file mode 100644 index bcfa8b839fba..000000000000 --- a/vendor/github.com/moby/buildkit/client/graph.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "time" - - digest "github.com/opencontainers/go-digest" -) - -type Vertex struct { - Digest digest.Digest - Inputs []digest.Digest - Name string - Started *time.Time - Completed *time.Time - Cached bool - Error string -} - -type VertexStatus struct { - ID string - Vertex digest.Digest - Name string - Total int64 - Current int64 - Timestamp time.Time - Started *time.Time - Completed *time.Time -} - -type VertexLog struct { - Vertex digest.Digest - Stream int - Data []byte - Timestamp time.Time -} - -type SolveStatus struct { - Vertexes []*Vertex - Statuses []*VertexStatus - Logs []*VertexLog -} - -type SolveResponse struct { - // ExporterResponse is also used for CacheExporter - ExporterResponse map[string]string -} diff --git a/vendor/github.com/moby/buildkit/client/llb/async.go b/vendor/github.com/moby/buildkit/client/llb/async.go deleted file mode 100644 index 48216e98957f..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/async.go +++ /dev/null @@ -1,98 +0,0 @@ -package llb - -import ( - "context" - - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/flightcontrol" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type asyncState struct { - f func(context.Context, State) (State, error) - prev State - target State - set bool - err error - g flightcontrol.Group -} - -func (as *asyncState) Output() Output { - return as -} - -func (as *asyncState) Vertex(ctx context.Context) Vertex { - err := as.Do(ctx) - if err != nil { - return &errVertex{err} - } - if as.set { - out := as.target.Output() - if out == nil { - return nil - } - return out.Vertex(ctx) - } - return nil -} - -func (as *asyncState) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error) { - err := as.Do(ctx) - if err != nil { - return nil, err - } - if as.set { - out := as.target.Output() - if out == nil { - return nil, nil - } - return out.ToInput(ctx, c) - } - return nil, nil -} - -func (as *asyncState) Do(ctx context.Context) error { - _, err := as.g.Do(ctx, "", func(ctx context.Context) (interface{}, error) { - if as.set { - return as.target, as.err - } - res, err := as.f(ctx, as.prev) - if err != nil { - select { - case <-ctx.Done(): - if errors.Is(err, ctx.Err()) { - return res, err - } - default: - } - } - as.target = res - as.err = err - as.set = true - return res, err - }) - if err != nil { - return err - } - return as.err -} - -type errVertex struct { - err error -} - -func (v *errVertex) Validate(context.Context) error { - return v.err -} -func (v *errVertex) Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { - return "", nil, nil, nil, v.err -} -func (v *errVertex) Output() Output { - return nil -} -func (v *errVertex) Inputs() []Output { - return nil -} - -var _ Vertex = &errVertex{} diff --git a/vendor/github.com/moby/buildkit/client/llb/definition.go b/vendor/github.com/moby/buildkit/client/llb/definition.go deleted file mode 100644 index 99af7c687965..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/definition.go +++ /dev/null @@ -1,227 +0,0 @@ -package llb - -import ( - "context" - "sync" - - "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// DefinitionOp implements llb.Vertex using a marshalled definition. -// -// For example, after marshalling a LLB state and sending over the wire, the -// LLB state can be reconstructed from the definition. -type DefinitionOp struct { - MarshalCache - mu sync.Mutex - ops map[digest.Digest]*pb.Op - defs map[digest.Digest][]byte - metas map[digest.Digest]pb.OpMetadata - sources map[digest.Digest][]*SourceLocation - platforms map[digest.Digest]*specs.Platform - dgst digest.Digest - index pb.OutputIndex - inputCache map[digest.Digest][]*DefinitionOp -} - -// NewDefinitionOp returns a new operation from a marshalled definition. -func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { - ops := make(map[digest.Digest]*pb.Op) - defs := make(map[digest.Digest][]byte) - platforms := make(map[digest.Digest]*specs.Platform) - - var dgst digest.Digest - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return nil, errors.Wrap(err, "failed to parse llb proto op") - } - dgst = digest.FromBytes(dt) - ops[dgst] = &op - defs[dgst] = dt - - var platform *specs.Platform - if op.Platform != nil { - spec := op.Platform.Spec() - platform = &spec - } - platforms[dgst] = platform - } - - srcs := map[digest.Digest][]*SourceLocation{} - - if def.Source != nil { - sourceMaps := make([]*SourceMap, len(def.Source.Infos)) - for i, info := range def.Source.Infos { - var st *State - sdef := info.Definition - if sdef != nil { - op, err := NewDefinitionOp(sdef) - if err != nil { - return nil, err - } - state := NewState(op) - st = &state - } - sourceMaps[i] = NewSourceMap(st, info.Filename, info.Data) - } - - for dgst, locs := range def.Source.Locations { - for _, loc := range locs.Locations { - if loc.SourceIndex < 0 || int(loc.SourceIndex) >= len(sourceMaps) { - return nil, errors.Errorf("failed to find source map with index %d", loc.SourceIndex) - } - - srcs[digest.Digest(dgst)] = append(srcs[digest.Digest(dgst)], &SourceLocation{ - SourceMap: sourceMaps[int(loc.SourceIndex)], - Ranges: loc.Ranges, - }) - } - } - } - - var index pb.OutputIndex - if dgst != "" { - index = ops[dgst].Inputs[0].Index - dgst = ops[dgst].Inputs[0].Digest - } - - return &DefinitionOp{ - ops: ops, - defs: defs, - metas: def.Metadata, - sources: srcs, - platforms: platforms, - dgst: dgst, - index: index, - inputCache: make(map[digest.Digest][]*DefinitionOp), - }, nil -} - -func (d *DefinitionOp) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error) { - return d.Output().ToInput(ctx, c) -} - -func (d *DefinitionOp) Vertex(context.Context) Vertex { - return d -} - -func (d *DefinitionOp) Validate(context.Context) error { - // Scratch state has no digest, ops or metas. - if d.dgst == "" { - return nil - } - - d.mu.Lock() - defer d.mu.Unlock() - - if len(d.ops) == 0 || len(d.defs) == 0 || len(d.metas) == 0 { - return errors.Errorf("invalid definition op with no ops %d %d", len(d.ops), len(d.metas)) - } - - _, ok := d.ops[d.dgst] - if !ok { - return errors.Errorf("invalid definition op with unknown op %q", d.dgst) - } - - _, ok = d.defs[d.dgst] - if !ok { - return errors.Errorf("invalid definition op with unknown def %q", d.dgst) - } - - _, ok = d.metas[d.dgst] - if !ok { - return errors.Errorf("invalid definition op with unknown metas %q", d.dgst) - } - - // It is possible for d.index >= len(d.ops[d.dgst]) when depending on scratch - // images. - if d.index < 0 { - return errors.Errorf("invalid definition op with invalid index") - } - - return nil -} - -func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { - if d.dgst == "" { - return "", nil, nil, nil, errors.Errorf("cannot marshal empty definition op") - } - - if err := d.Validate(ctx); err != nil { - return "", nil, nil, nil, err - } - - d.mu.Lock() - defer d.mu.Unlock() - - meta := d.metas[d.dgst] - return d.dgst, d.defs[d.dgst], &meta, d.sources[d.dgst], nil - -} - -func (d *DefinitionOp) Output() Output { - if d.dgst == "" { - return nil - } - - d.mu.Lock() - platform := d.platforms[d.dgst] - d.mu.Unlock() - - return &output{vertex: d, platform: platform, getIndex: func() (pb.OutputIndex, error) { - return d.index, nil - }} -} - -func (d *DefinitionOp) Inputs() []Output { - if d.dgst == "" { - return nil - } - - var inputs []Output - - d.mu.Lock() - op := d.ops[d.dgst] - platform := d.platforms[d.dgst] - d.mu.Unlock() - - for _, input := range op.Inputs { - var vtx *DefinitionOp - d.mu.Lock() - if existingIndexes, ok := d.inputCache[input.Digest]; ok { - if int(input.Index) < len(existingIndexes) && existingIndexes[input.Index] != nil { - vtx = existingIndexes[input.Index] - } - } - if vtx == nil { - vtx = &DefinitionOp{ - ops: d.ops, - defs: d.defs, - metas: d.metas, - platforms: d.platforms, - dgst: input.Digest, - index: input.Index, - inputCache: d.inputCache, - } - existingIndexes := d.inputCache[input.Digest] - indexDiff := int(input.Index) - len(existingIndexes) - if indexDiff >= 0 { - // make room in the slice for the new index being set - existingIndexes = append(existingIndexes, make([]*DefinitionOp, indexDiff+1)...) - } - existingIndexes[input.Index] = vtx - d.inputCache[input.Digest] = existingIndexes - } - d.mu.Unlock() - - inputs = append(inputs, &output{vertex: vtx, platform: platform, getIndex: func() (pb.OutputIndex, error) { - return pb.OutputIndex(vtx.index), nil - }}) - } - - return inputs -} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go deleted file mode 100644 index 54182e3ff83e..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ /dev/null @@ -1,669 +0,0 @@ -package llb - -import ( - "context" - _ "crypto/sha256" // for opencontainers/go-digest - "fmt" - "net" - "sort" - - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/system" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -func NewExecOp(base State, proxyEnv *ProxyEnv, readOnly bool, c Constraints) *ExecOp { - e := &ExecOp{base: base, constraints: c, proxyEnv: proxyEnv} - root := base.Output() - rootMount := &mount{ - target: pb.RootMount, - source: root, - readonly: readOnly, - } - e.mounts = append(e.mounts, rootMount) - if readOnly { - e.root = root - } else { - o := &output{vertex: e, getIndex: e.getMountIndexFn(rootMount)} - if p := c.Platform; p != nil { - o.platform = p - } - e.root = o - } - rootMount.output = e.root - return e -} - -type mount struct { - target string - readonly bool - source Output - output Output - selector string - cacheID string - tmpfs bool - cacheSharing CacheMountSharingMode - noOutput bool -} - -type ExecOp struct { - MarshalCache - proxyEnv *ProxyEnv - root Output - mounts []*mount - base State - constraints Constraints - isValidated bool - secrets []SecretInfo - ssh []SSHInfo -} - -func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output { - m := &mount{ - target: target, - source: source, - } - for _, o := range opt { - o(m) - } - e.mounts = append(e.mounts, m) - if m.readonly { - m.output = source - } else if m.tmpfs { - m.output = &output{vertex: e, err: errors.Errorf("tmpfs mount for %s can't be used as a parent", target)} - } else if m.noOutput { - m.output = &output{vertex: e, err: errors.Errorf("mount marked no-output and %s can't be used as a parent", target)} - } else { - o := &output{vertex: e, getIndex: e.getMountIndexFn(m)} - if p := e.constraints.Platform; p != nil { - o.platform = p - } - m.output = o - } - e.Store(nil, nil, nil, nil) - e.isValidated = false - return m.output -} - -func (e *ExecOp) GetMount(target string) Output { - for _, m := range e.mounts { - if m.target == target { - return m.output - } - } - return nil -} - -func (e *ExecOp) Validate(ctx context.Context) error { - if e.isValidated { - return nil - } - args, err := getArgs(e.base)(ctx) - if err != nil { - return err - } - if len(args) == 0 { - return errors.Errorf("arguments are required") - } - cwd, err := getDir(e.base)(ctx) - if err != nil { - return err - } - if cwd == "" { - return errors.Errorf("working directory is required") - } - for _, m := range e.mounts { - if m.source != nil { - if err := m.source.Vertex(ctx).Validate(ctx); err != nil { - return err - } - } - } - e.isValidated = true - return nil -} - -func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { - if e.Cached(c) { - return e.Load() - } - if err := e.Validate(ctx); err != nil { - return "", nil, nil, nil, err - } - // make sure mounts are sorted - sort.Slice(e.mounts, func(i, j int) bool { - return e.mounts[i].target < e.mounts[j].target - }) - - env, err := getEnv(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - - if len(e.ssh) > 0 { - for i, s := range e.ssh { - if s.Target == "" { - e.ssh[i].Target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", i) - } - } - if _, ok := env.Get("SSH_AUTH_SOCK"); !ok { - env = env.AddOrReplace("SSH_AUTH_SOCK", e.ssh[0].Target) - } - } - if c.Caps != nil { - if err := c.Caps.Supports(pb.CapExecMetaSetsDefaultPath); err != nil { - os := "linux" - if c.Platform != nil { - os = c.Platform.OS - } else if e.constraints.Platform != nil { - os = e.constraints.Platform.OS - } - env = env.SetDefault("PATH", system.DefaultPathEnv(os)) - } else { - addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath) - } - } - - args, err := getArgs(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - - cwd, err := getDir(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - - user, err := getUser(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - - hostname, err := getHostname(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - - meta := &pb.Meta{ - Args: args, - Env: env.ToArray(), - Cwd: cwd, - User: user, - Hostname: hostname, - } - extraHosts, err := getExtraHosts(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - if len(extraHosts) > 0 { - hosts := make([]*pb.HostIP, len(extraHosts)) - for i, h := range extraHosts { - hosts[i] = &pb.HostIP{Host: h.Host, IP: h.IP.String()} - } - meta.ExtraHosts = hosts - } - - network, err := getNetwork(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - - security, err := getSecurity(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - - peo := &pb.ExecOp{ - Meta: meta, - Network: network, - Security: security, - } - if network != NetModeSandbox { - addCap(&e.constraints, pb.CapExecMetaNetwork) - } - - if security != SecurityModeSandbox { - addCap(&e.constraints, pb.CapExecMetaSecurity) - } - - if p := e.proxyEnv; p != nil { - peo.Meta.ProxyEnv = &pb.ProxyEnv{ - HttpProxy: p.HTTPProxy, - HttpsProxy: p.HTTPSProxy, - FtpProxy: p.FTPProxy, - NoProxy: p.NoProxy, - AllProxy: p.AllProxy, - } - addCap(&e.constraints, pb.CapExecMetaProxy) - } - - addCap(&e.constraints, pb.CapExecMetaBase) - - for _, m := range e.mounts { - if m.selector != "" { - addCap(&e.constraints, pb.CapExecMountSelector) - } - if m.cacheID != "" { - addCap(&e.constraints, pb.CapExecMountCache) - addCap(&e.constraints, pb.CapExecMountCacheSharing) - } else if m.tmpfs { - addCap(&e.constraints, pb.CapExecMountTmpfs) - } else if m.source != nil { - addCap(&e.constraints, pb.CapExecMountBind) - } - } - - if len(e.secrets) > 0 { - addCap(&e.constraints, pb.CapExecMountSecret) - } - - if len(e.ssh) > 0 { - addCap(&e.constraints, pb.CapExecMountSSH) - } - - if e.constraints.Platform == nil { - p, err := getPlatform(e.base)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - e.constraints.Platform = p - } - - pop, md := MarshalConstraints(c, &e.constraints) - pop.Op = &pb.Op_Exec{ - Exec: peo, - } - - outIndex := 0 - for _, m := range e.mounts { - inputIndex := pb.InputIndex(len(pop.Inputs)) - if m.source != nil { - if m.tmpfs { - return "", nil, nil, nil, errors.Errorf("tmpfs mounts must use scratch") - } - inp, err := m.source.ToInput(ctx, c) - if err != nil { - return "", nil, nil, nil, err - } - - newInput := true - - for i, inp2 := range pop.Inputs { - if *inp == *inp2 { - inputIndex = pb.InputIndex(i) - newInput = false - break - } - } - - if newInput { - pop.Inputs = append(pop.Inputs, inp) - } - } else { - inputIndex = pb.Empty - } - - outputIndex := pb.OutputIndex(-1) - if !m.noOutput && !m.readonly && m.cacheID == "" && !m.tmpfs { - outputIndex = pb.OutputIndex(outIndex) - outIndex++ - } - - pm := &pb.Mount{ - Input: inputIndex, - Dest: m.target, - Readonly: m.readonly, - Output: outputIndex, - Selector: m.selector, - } - if m.cacheID != "" { - pm.MountType = pb.MountType_CACHE - pm.CacheOpt = &pb.CacheOpt{ - ID: m.cacheID, - } - switch m.cacheSharing { - case CacheMountShared: - pm.CacheOpt.Sharing = pb.CacheSharingOpt_SHARED - case CacheMountPrivate: - pm.CacheOpt.Sharing = pb.CacheSharingOpt_PRIVATE - case CacheMountLocked: - pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED - } - } - if m.tmpfs { - pm.MountType = pb.MountType_TMPFS - } - peo.Mounts = append(peo.Mounts, pm) - } - - for _, s := range e.secrets { - pm := &pb.Mount{ - Dest: s.Target, - MountType: pb.MountType_SECRET, - SecretOpt: &pb.SecretOpt{ - ID: s.ID, - Uid: uint32(s.UID), - Gid: uint32(s.GID), - Optional: s.Optional, - Mode: uint32(s.Mode), - }, - } - peo.Mounts = append(peo.Mounts, pm) - } - - for _, s := range e.ssh { - pm := &pb.Mount{ - Dest: s.Target, - MountType: pb.MountType_SSH, - SSHOpt: &pb.SSHOpt{ - ID: s.ID, - Uid: uint32(s.UID), - Gid: uint32(s.GID), - Mode: uint32(s.Mode), - Optional: s.Optional, - }, - } - peo.Mounts = append(peo.Mounts, pm) - } - - dt, err := pop.Marshal() - if err != nil { - return "", nil, nil, nil, err - } - e.Store(dt, md, e.constraints.SourceLocations, c) - return e.Load() -} - -func (e *ExecOp) Output() Output { - return e.root -} - -func (e *ExecOp) Inputs() (inputs []Output) { - mm := map[Output]struct{}{} - for _, m := range e.mounts { - if m.source != nil { - mm[m.source] = struct{}{} - } - } - for o := range mm { - inputs = append(inputs, o) - } - return -} - -func (e *ExecOp) getMountIndexFn(m *mount) func() (pb.OutputIndex, error) { - return func() (pb.OutputIndex, error) { - // make sure mounts are sorted - sort.Slice(e.mounts, func(i, j int) bool { - return e.mounts[i].target < e.mounts[j].target - }) - - i := 0 - for _, m2 := range e.mounts { - if m2.noOutput || m2.readonly || m2.tmpfs || m2.cacheID != "" { - continue - } - if m == m2 { - return pb.OutputIndex(i), nil - } - i++ - } - return pb.OutputIndex(0), errors.Errorf("invalid mount: %s", m.target) - } -} - -type ExecState struct { - State - exec *ExecOp -} - -func (e ExecState) AddMount(target string, source State, opt ...MountOption) State { - return source.WithOutput(e.exec.AddMount(target, source.Output(), opt...)) -} - -func (e ExecState) GetMount(target string) State { - return NewState(e.exec.GetMount(target)) -} - -func (e ExecState) Root() State { - return e.State -} - -type MountOption func(*mount) - -func Readonly(m *mount) { - m.readonly = true -} - -func SourcePath(src string) MountOption { - return func(m *mount) { - m.selector = src - } -} - -func ForceNoOutput(m *mount) { - m.noOutput = true -} - -func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption { - return func(m *mount) { - m.cacheID = id - m.cacheSharing = sharing - } -} - -func Tmpfs() MountOption { - return func(m *mount) { - m.tmpfs = true - } -} - -type RunOption interface { - SetRunOption(es *ExecInfo) -} - -type runOptionFunc func(*ExecInfo) - -func (fn runOptionFunc) SetRunOption(ei *ExecInfo) { - fn(ei) -} - -func (fn StateOption) SetRunOption(ei *ExecInfo) { - ei.State = ei.State.With(fn) -} - -var _ RunOption = StateOption(func(_ State) State { return State{} }) - -func Shlex(str string) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = shlexf(str, false)(ei.State) - }) -} -func Shlexf(str string, v ...interface{}) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = shlexf(str, true, v...)(ei.State) - }) -} - -func Args(a []string) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = args(a...)(ei.State) - }) -} - -func AddExtraHost(host string, ip net.IP) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.AddExtraHost(host, ip) - }) -} - -func With(so ...StateOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.State = ei.State.With(so...) - }) -} - -func AddMount(dest string, mountState State, opts ...MountOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.Mounts = append(ei.Mounts, MountInfo{dest, mountState.Output(), opts}) - }) -} - -func AddSSHSocket(opts ...SSHOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - s := &SSHInfo{ - Mode: 0600, - } - for _, opt := range opts { - opt.SetSSHOption(s) - } - ei.SSH = append(ei.SSH, *s) - }) -} - -type SSHOption interface { - SetSSHOption(*SSHInfo) -} - -type sshOptionFunc func(*SSHInfo) - -func (fn sshOptionFunc) SetSSHOption(si *SSHInfo) { - fn(si) -} - -func SSHID(id string) SSHOption { - return sshOptionFunc(func(si *SSHInfo) { - si.ID = id - }) -} - -func SSHSocketTarget(target string) SSHOption { - return sshOptionFunc(func(si *SSHInfo) { - si.Target = target - }) -} - -func SSHSocketOpt(target string, uid, gid, mode int) SSHOption { - return sshOptionFunc(func(si *SSHInfo) { - si.Target = target - si.UID = uid - si.GID = gid - si.Mode = mode - }) -} - -var SSHOptional = sshOptionFunc(func(si *SSHInfo) { - si.Optional = true -}) - -type SSHInfo struct { - ID string - Target string - Mode int - UID int - GID int - Optional bool -} - -func AddSecret(dest string, opts ...SecretOption) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - s := &SecretInfo{ID: dest, Target: dest, Mode: 0400} - for _, opt := range opts { - opt.SetSecretOption(s) - } - ei.Secrets = append(ei.Secrets, *s) - }) -} - -type SecretOption interface { - SetSecretOption(*SecretInfo) -} - -type secretOptionFunc func(*SecretInfo) - -func (fn secretOptionFunc) SetSecretOption(si *SecretInfo) { - fn(si) -} - -type SecretInfo struct { - ID string - Target string - Mode int - UID int - GID int - Optional bool -} - -var SecretOptional = secretOptionFunc(func(si *SecretInfo) { - si.Optional = true -}) - -func SecretID(id string) SecretOption { - return secretOptionFunc(func(si *SecretInfo) { - si.ID = id - }) -} - -func SecretFileOpt(uid, gid, mode int) SecretOption { - return secretOptionFunc(func(si *SecretInfo) { - si.UID = uid - si.GID = gid - si.Mode = mode - }) -} - -func ReadonlyRootFS() RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.ReadonlyRootFS = true - }) -} - -func WithProxy(ps ProxyEnv) RunOption { - return runOptionFunc(func(ei *ExecInfo) { - ei.ProxyEnv = &ps - }) -} - -type ExecInfo struct { - constraintsWrapper - State State - Mounts []MountInfo - ReadonlyRootFS bool - ProxyEnv *ProxyEnv - Secrets []SecretInfo - SSH []SSHInfo -} - -type MountInfo struct { - Target string - Source Output - Opts []MountOption -} - -type ProxyEnv struct { - HTTPProxy string - HTTPSProxy string - FTPProxy string - NoProxy string - AllProxy string -} - -type CacheMountSharingMode int - -const ( - CacheMountShared CacheMountSharingMode = iota - CacheMountPrivate - CacheMountLocked -) - -const ( - NetModeSandbox = pb.NetMode_UNSET - NetModeHost = pb.NetMode_HOST - NetModeNone = pb.NetMode_NONE -) - -const ( - SecurityModeInsecure = pb.SecurityMode_INSECURE - SecurityModeSandbox = pb.SecurityMode_SANDBOX -) diff --git a/vendor/github.com/moby/buildkit/client/llb/fileop.go b/vendor/github.com/moby/buildkit/client/llb/fileop.go deleted file mode 100644 index 1f43974cb080..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/fileop.go +++ /dev/null @@ -1,776 +0,0 @@ -package llb - -import ( - "context" - _ "crypto/sha256" // for opencontainers/go-digest - "os" - "path" - "strconv" - "strings" - "time" - - "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -// Examples: -// local := llb.Local(...) -// llb.Image().Dir("/abc").File(Mkdir("./foo").Mkfile("/abc/foo/bar", []byte("data"))) -// llb.Image().File(Mkdir("/foo").Mkfile("/foo/bar", []byte("data"))) -// llb.Image().File(Copy(local, "/foo", "/bar")).File(Copy(local, "/foo2", "/bar2")) -// -// a := Mkdir("./foo") // *FileAction /ced/foo -// b := Mkdir("./bar") // /abc/bar -// c := b.Copy(a.WithState(llb.Scratch().Dir("/ced")), "./foo", "./baz") // /abc/baz -// llb.Image().Dir("/abc").File(c) -// -// In future this can be extended to multiple outputs with: -// a := Mkdir("./foo") -// b, id := a.GetSelector() -// c := b.Mkdir("./bar") -// filestate = state.File(c) -// filestate.GetOutput(id).Exec() - -func NewFileOp(s State, action *FileAction, c Constraints) *FileOp { - action = action.bind(s) - - f := &FileOp{ - action: action, - constraints: c, - } - - f.output = &output{vertex: f, getIndex: func() (pb.OutputIndex, error) { - return pb.OutputIndex(0), nil - }} - - return f -} - -// CopyInput is either llb.State or *FileActionWithState -type CopyInput interface { - isFileOpCopyInput() -} - -type subAction interface { - toProtoAction(context.Context, string, pb.InputIndex) (pb.IsFileAction, error) -} - -type capAdder interface { - addCaps(*FileOp) -} - -type FileAction struct { - state *State - prev *FileAction - action subAction - err error -} - -func (fa *FileAction) Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { - a := Mkdir(p, m, opt...) - a.prev = fa - return a -} - -func (fa *FileAction) Mkfile(p string, m os.FileMode, dt []byte, opt ...MkfileOption) *FileAction { - a := Mkfile(p, m, dt, opt...) - a.prev = fa - return a -} - -func (fa *FileAction) Rm(p string, opt ...RmOption) *FileAction { - a := Rm(p, opt...) - a.prev = fa - return a -} - -func (fa *FileAction) Copy(input CopyInput, src, dest string, opt ...CopyOption) *FileAction { - a := Copy(input, src, dest, opt...) - a.prev = fa - return a -} - -func (fa *FileAction) allOutputs(m map[Output]struct{}) { - if fa == nil { - return - } - if fa.state != nil && fa.state.Output() != nil { - m[fa.state.Output()] = struct{}{} - } - - if a, ok := fa.action.(*fileActionCopy); ok { - if a.state != nil { - if out := a.state.Output(); out != nil { - m[out] = struct{}{} - } - } else if a.fas != nil { - a.fas.allOutputs(m) - } - } - fa.prev.allOutputs(m) -} - -func (fa *FileAction) bind(s State) *FileAction { - if fa == nil { - return nil - } - fa2 := *fa - fa2.prev = fa.prev.bind(s) - fa2.state = &s - return &fa2 -} - -func (fa *FileAction) WithState(s State) CopyInput { - return &fileActionWithState{FileAction: fa.bind(s)} -} - -type fileActionWithState struct { - *FileAction -} - -func (fas *fileActionWithState) isFileOpCopyInput() {} - -func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { - var mi MkdirInfo - for _, o := range opt { - o.SetMkdirOption(&mi) - } - return &FileAction{ - action: &fileActionMkdir{ - file: p, - mode: m, - info: mi, - }, - } -} - -type fileActionMkdir struct { - file string - mode os.FileMode - info MkdirInfo -} - -func (a *fileActionMkdir) toProtoAction(ctx context.Context, parent string, base pb.InputIndex) (pb.IsFileAction, error) { - return &pb.FileAction_Mkdir{ - Mkdir: &pb.FileActionMkDir{ - Path: normalizePath(parent, a.file, false), - Mode: int32(a.mode & 0777), - MakeParents: a.info.MakeParents, - Owner: a.info.ChownOpt.marshal(base), - Timestamp: marshalTime(a.info.CreatedTime), - }, - }, nil -} - -type MkdirOption interface { - SetMkdirOption(*MkdirInfo) -} - -type ChownOption interface { - MkdirOption - MkfileOption - CopyOption -} - -type mkdirOptionFunc func(*MkdirInfo) - -func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) { - fn(mi) -} - -var _ MkdirOption = &MkdirInfo{} - -func WithParents(b bool) MkdirOption { - return mkdirOptionFunc(func(mi *MkdirInfo) { - mi.MakeParents = b - }) -} - -type MkdirInfo struct { - MakeParents bool - ChownOpt *ChownOpt - CreatedTime *time.Time -} - -func (mi *MkdirInfo) SetMkdirOption(mi2 *MkdirInfo) { - *mi2 = *mi -} - -func WithUser(name string) ChownOption { - opt := ChownOpt{} - - parts := strings.SplitN(name, ":", 2) - for i, v := range parts { - switch i { - case 0: - uid, err := parseUID(v) - if err != nil { - opt.User = &UserOpt{Name: v} - } else { - opt.User = &UserOpt{UID: uid} - } - case 1: - gid, err := parseUID(v) - if err != nil { - opt.Group = &UserOpt{Name: v} - } else { - opt.Group = &UserOpt{UID: gid} - } - } - } - - return opt -} - -func parseUID(str string) (int, error) { - if str == "root" { - return 0, nil - } - uid, err := strconv.ParseInt(str, 10, 32) - if err != nil { - return 0, err - } - return int(uid), nil -} - -func WithUIDGID(uid, gid int) ChownOption { - return ChownOpt{ - User: &UserOpt{UID: uid}, - Group: &UserOpt{UID: gid}, - } -} - -type ChownOpt struct { - User *UserOpt - Group *UserOpt -} - -func (co ChownOpt) SetMkdirOption(mi *MkdirInfo) { - mi.ChownOpt = &co -} -func (co ChownOpt) SetMkfileOption(mi *MkfileInfo) { - mi.ChownOpt = &co -} -func (co ChownOpt) SetCopyOption(mi *CopyInfo) { - mi.ChownOpt = &co -} - -func (co *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt { - if co == nil { - return nil - } - return &pb.ChownOpt{ - User: co.User.marshal(base), - Group: co.Group.marshal(base), - } -} - -type UserOpt struct { - UID int - Name string -} - -func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt { - if up == nil { - return nil - } - if up.Name != "" { - return &pb.UserOpt{User: &pb.UserOpt_ByName{ByName: &pb.NamedUserOpt{ - Name: up.Name, Input: base}}} - } - return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}} -} - -func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction { - var mi MkfileInfo - for _, o := range opts { - o.SetMkfileOption(&mi) - } - - return &FileAction{ - action: &fileActionMkfile{ - file: p, - mode: m, - dt: dt, - info: mi, - }, - } -} - -type MkfileOption interface { - SetMkfileOption(*MkfileInfo) -} - -type MkfileInfo struct { - ChownOpt *ChownOpt - CreatedTime *time.Time -} - -func (mi *MkfileInfo) SetMkfileOption(mi2 *MkfileInfo) { - *mi2 = *mi -} - -var _ MkfileOption = &MkfileInfo{} - -type fileActionMkfile struct { - file string - mode os.FileMode - dt []byte - info MkfileInfo -} - -func (a *fileActionMkfile) toProtoAction(ctx context.Context, parent string, base pb.InputIndex) (pb.IsFileAction, error) { - return &pb.FileAction_Mkfile{ - Mkfile: &pb.FileActionMkFile{ - Path: normalizePath(parent, a.file, false), - Mode: int32(a.mode & 0777), - Data: a.dt, - Owner: a.info.ChownOpt.marshal(base), - Timestamp: marshalTime(a.info.CreatedTime), - }, - }, nil -} - -func Rm(p string, opts ...RmOption) *FileAction { - var mi RmInfo - for _, o := range opts { - o.SetRmOption(&mi) - } - - return &FileAction{ - action: &fileActionRm{ - file: p, - info: mi, - }, - } -} - -type RmOption interface { - SetRmOption(*RmInfo) -} - -type rmOptionFunc func(*RmInfo) - -func (fn rmOptionFunc) SetRmOption(mi *RmInfo) { - fn(mi) -} - -type RmInfo struct { - AllowNotFound bool - AllowWildcard bool -} - -func (mi *RmInfo) SetRmOption(mi2 *RmInfo) { - *mi2 = *mi -} - -var _ RmOption = &RmInfo{} - -func WithAllowNotFound(b bool) RmOption { - return rmOptionFunc(func(mi *RmInfo) { - mi.AllowNotFound = b - }) -} - -func WithAllowWildcard(b bool) RmOption { - return rmOptionFunc(func(mi *RmInfo) { - mi.AllowWildcard = b - }) -} - -type fileActionRm struct { - file string - info RmInfo -} - -func (a *fileActionRm) toProtoAction(ctx context.Context, parent string, base pb.InputIndex) (pb.IsFileAction, error) { - return &pb.FileAction_Rm{ - Rm: &pb.FileActionRm{ - Path: normalizePath(parent, a.file, false), - AllowNotFound: a.info.AllowNotFound, - AllowWildcard: a.info.AllowWildcard, - }, - }, nil -} - -func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction { - var state *State - var fas *fileActionWithState - var err error - if st, ok := input.(State); ok { - state = &st - } else if v, ok := input.(*fileActionWithState); ok { - fas = v - } else { - err = errors.Errorf("invalid input type %T for copy", input) - } - - var mi CopyInfo - for _, o := range opts { - o.SetCopyOption(&mi) - } - - return &FileAction{ - action: &fileActionCopy{ - state: state, - fas: fas, - src: src, - dest: dest, - info: mi, - }, - err: err, - } -} - -type CopyOption interface { - SetCopyOption(*CopyInfo) -} - -type CopyInfo struct { - Mode *os.FileMode - FollowSymlinks bool - CopyDirContentsOnly bool - IncludePatterns []string - ExcludePatterns []string - AttemptUnpack bool - CreateDestPath bool - AllowWildcard bool - AllowEmptyWildcard bool - ChownOpt *ChownOpt - CreatedTime *time.Time -} - -func (mi *CopyInfo) SetCopyOption(mi2 *CopyInfo) { - *mi2 = *mi -} - -var _ CopyOption = &CopyInfo{} - -type fileActionCopy struct { - state *State - fas *fileActionWithState - src string - dest string - info CopyInfo -} - -func (a *fileActionCopy) toProtoAction(ctx context.Context, parent string, base pb.InputIndex) (pb.IsFileAction, error) { - src, err := a.sourcePath(ctx) - if err != nil { - return nil, err - } - c := &pb.FileActionCopy{ - Src: src, - Dest: normalizePath(parent, a.dest, true), - Owner: a.info.ChownOpt.marshal(base), - IncludePatterns: a.info.IncludePatterns, - ExcludePatterns: a.info.ExcludePatterns, - AllowWildcard: a.info.AllowWildcard, - AllowEmptyWildcard: a.info.AllowEmptyWildcard, - FollowSymlink: a.info.FollowSymlinks, - DirCopyContents: a.info.CopyDirContentsOnly, - AttemptUnpackDockerCompatibility: a.info.AttemptUnpack, - CreateDestPath: a.info.CreateDestPath, - Timestamp: marshalTime(a.info.CreatedTime), - } - if a.info.Mode != nil { - c.Mode = int32(*a.info.Mode) - } else { - c.Mode = -1 - } - return &pb.FileAction_Copy{ - Copy: c, - }, nil -} - -func (a *fileActionCopy) sourcePath(ctx context.Context) (string, error) { - p := path.Clean(a.src) - if !path.IsAbs(p) { - if a.state != nil { - dir, err := a.state.GetDir(ctx) - if err != nil { - return "", err - } - p = path.Join("/", dir, p) - } else if a.fas != nil { - dir, err := a.fas.state.GetDir(ctx) - if err != nil { - return "", err - } - p = path.Join("/", dir, p) - } - } - return p, nil -} - -func (a *fileActionCopy) addCaps(f *FileOp) { - if len(a.info.IncludePatterns) != 0 || len(a.info.ExcludePatterns) != 0 { - addCap(&f.constraints, pb.CapFileCopyIncludeExcludePatterns) - } -} - -type CreatedTime time.Time - -func WithCreatedTime(t time.Time) CreatedTime { - return CreatedTime(t) -} - -func (c CreatedTime) SetMkdirOption(mi *MkdirInfo) { - mi.CreatedTime = (*time.Time)(&c) -} - -func (c CreatedTime) SetMkfileOption(mi *MkfileInfo) { - mi.CreatedTime = (*time.Time)(&c) -} - -func (c CreatedTime) SetCopyOption(mi *CopyInfo) { - mi.CreatedTime = (*time.Time)(&c) -} - -func marshalTime(t *time.Time) int64 { - if t == nil { - return -1 - } - return t.UnixNano() -} - -type FileOp struct { - MarshalCache - action *FileAction - output Output - - constraints Constraints - isValidated bool -} - -func (f *FileOp) Validate(context.Context) error { - if f.isValidated { - return nil - } - if f.action == nil { - return errors.Errorf("action is required") - } - f.isValidated = true - return nil -} - -type marshalState struct { - ctx context.Context - visited map[*FileAction]*fileActionState - inputs []*pb.Input - actions []*fileActionState -} - -func newMarshalState(ctx context.Context) *marshalState { - return &marshalState{ - visited: map[*FileAction]*fileActionState{}, - ctx: ctx, - } -} - -type fileActionState struct { - base pb.InputIndex - input pb.InputIndex - inputRelative *int - input2 pb.InputIndex - input2Relative *int - target int - action subAction - fa *FileAction -} - -func (ms *marshalState) addInput(st *fileActionState, c *Constraints, o Output) (pb.InputIndex, error) { - inp, err := o.ToInput(ms.ctx, c) - if err != nil { - return 0, err - } - for i, inp2 := range ms.inputs { - if *inp == *inp2 { - return pb.InputIndex(i), nil - } - } - i := pb.InputIndex(len(ms.inputs)) - ms.inputs = append(ms.inputs, inp) - return i, nil -} - -func (ms *marshalState) add(fa *FileAction, c *Constraints) (*fileActionState, error) { - if st, ok := ms.visited[fa]; ok { - return st, nil - } - - if fa.err != nil { - return nil, fa.err - } - - var prevState *fileActionState - if parent := fa.prev; parent != nil { - var err error - prevState, err = ms.add(parent, c) - if err != nil { - return nil, err - } - } - - st := &fileActionState{ - action: fa.action, - input: -1, - input2: -1, - base: -1, - fa: fa, - } - - if source := fa.state.Output(); source != nil { - inp, err := ms.addInput(st, c, source) - if err != nil { - return nil, err - } - st.base = inp - } - - if fa.prev == nil { - st.input = st.base - } else { - st.inputRelative = &prevState.target - } - - if a, ok := fa.action.(*fileActionCopy); ok { - if a.state != nil { - if out := a.state.Output(); out != nil { - inp, err := ms.addInput(st, c, out) - if err != nil { - return nil, err - } - st.input2 = inp - } - } else if a.fas != nil { - src, err := ms.add(a.fas.FileAction, c) - if err != nil { - return nil, err - } - st.input2Relative = &src.target - } else { - return nil, errors.Errorf("invalid empty source for copy") - } - } - - st.target = len(ms.actions) - - ms.visited[fa] = st - ms.actions = append(ms.actions, st) - - return st, nil -} - -func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { - if f.Cached(c) { - return f.Load() - } - if err := f.Validate(ctx); err != nil { - return "", nil, nil, nil, err - } - - addCap(&f.constraints, pb.CapFileBase) - - pfo := &pb.FileOp{} - - if f.constraints.Platform == nil { - p, err := getPlatform(*f.action.state)(ctx) - if err != nil { - return "", nil, nil, nil, err - } - f.constraints.Platform = p - } - - pop, md := MarshalConstraints(c, &f.constraints) - pop.Op = &pb.Op_File{ - File: pfo, - } - - state := newMarshalState(ctx) - _, err := state.add(f.action, c) - if err != nil { - return "", nil, nil, nil, err - } - pop.Inputs = state.inputs - - for i, st := range state.actions { - if adder, isCapAdder := st.action.(capAdder); isCapAdder { - adder.addCaps(f) - } - - output := pb.OutputIndex(-1) - if i+1 == len(state.actions) { - output = 0 - } - - var parent string - if st.fa.state != nil { - parent, err = st.fa.state.GetDir(ctx) - if err != nil { - return "", nil, nil, nil, err - } - } - - action, err := st.action.toProtoAction(ctx, parent, st.base) - if err != nil { - return "", nil, nil, nil, err - } - - pfo.Actions = append(pfo.Actions, &pb.FileAction{ - Input: getIndex(st.input, len(state.inputs), st.inputRelative), - SecondaryInput: getIndex(st.input2, len(state.inputs), st.input2Relative), - Output: output, - Action: action, - }) - } - - dt, err := pop.Marshal() - if err != nil { - return "", nil, nil, nil, err - } - f.Store(dt, md, f.constraints.SourceLocations, c) - return f.Load() -} - -func normalizePath(parent, p string, keepSlash bool) string { - origPath := p - p = path.Clean(p) - if !path.IsAbs(p) { - p = path.Join("/", parent, p) - } - if keepSlash { - if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(p, "/") { - p += "/" - } else if strings.HasSuffix(origPath, "/.") { - if p != "/" { - p += "/" - } - p += "." - } - } - return p -} - -func (f *FileOp) Output() Output { - return f.output -} - -func (f *FileOp) Inputs() (inputs []Output) { - mm := map[Output]struct{}{} - - f.action.allOutputs(mm) - - for o := range mm { - inputs = append(inputs, o) - } - return inputs -} - -func getIndex(input pb.InputIndex, len int, relative *int) pb.InputIndex { - if relative != nil { - return pb.InputIndex(len + *relative) - } - return input -} diff --git a/vendor/github.com/moby/buildkit/client/llb/marshal.go b/vendor/github.com/moby/buildkit/client/llb/marshal.go deleted file mode 100644 index 282b592b7b1e..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/marshal.go +++ /dev/null @@ -1,117 +0,0 @@ -package llb - -import ( - "io" - "io/ioutil" - - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/solver/pb" - digest "github.com/opencontainers/go-digest" -) - -// Definition is the LLB definition structure with per-vertex metadata entries -// Corresponds to the Definition structure defined in solver/pb.Definition. -type Definition struct { - Def [][]byte - Metadata map[digest.Digest]pb.OpMetadata - Source *pb.Source -} - -func (def *Definition) ToPB() *pb.Definition { - md := make(map[digest.Digest]pb.OpMetadata, len(def.Metadata)) - for k, v := range def.Metadata { - md[k] = v - } - return &pb.Definition{ - Def: def.Def, - Source: def.Source, - Metadata: md, - } -} - -func (def *Definition) FromPB(x *pb.Definition) { - def.Def = x.Def - def.Source = x.Source - def.Metadata = make(map[digest.Digest]pb.OpMetadata) - for k, v := range x.Metadata { - def.Metadata[k] = v - } -} - -func WriteTo(def *Definition, w io.Writer) error { - b, err := def.ToPB().Marshal() - if err != nil { - return err - } - _, err = w.Write(b) - return err -} - -func ReadFrom(r io.Reader) (*Definition, error) { - b, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - var pbDef pb.Definition - if err := pbDef.Unmarshal(b); err != nil { - return nil, err - } - var def Definition - def.FromPB(&pbDef) - return &def, nil -} - -func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { - c := *base - c.WorkerConstraints = append([]string{}, c.WorkerConstraints...) - - if p := override.Platform; p != nil { - c.Platform = p - } - - for _, wc := range override.WorkerConstraints { - c.WorkerConstraints = append(c.WorkerConstraints, wc) - } - - c.Metadata = mergeMetadata(c.Metadata, override.Metadata) - - if c.Platform == nil { - defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) - c.Platform = &defaultPlatform - } - - return &pb.Op{ - Platform: &pb.Platform{ - OS: c.Platform.OS, - Architecture: c.Platform.Architecture, - Variant: c.Platform.Variant, - OSVersion: c.Platform.OSVersion, - OSFeatures: c.Platform.OSFeatures, - }, - Constraints: &pb.WorkerConstraints{ - Filter: c.WorkerConstraints, - }, - }, &c.Metadata -} - -type MarshalCache struct { - digest digest.Digest - dt []byte - md *pb.OpMetadata - srcs []*SourceLocation - constraints *Constraints -} - -func (mc *MarshalCache) Cached(c *Constraints) bool { - return mc.dt != nil && mc.constraints == c -} -func (mc *MarshalCache) Load() (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { - return mc.digest, mc.dt, mc.md, mc.srcs, nil -} -func (mc *MarshalCache) Store(dt []byte, md *pb.OpMetadata, srcs []*SourceLocation, c *Constraints) { - mc.digest = digest.FromBytes(dt) - mc.dt = dt - mc.md = md - mc.constraints = c - mc.srcs = srcs -} diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go deleted file mode 100644 index 80cc18dab1c7..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/meta.go +++ /dev/null @@ -1,323 +0,0 @@ -package llb - -import ( - "context" - "fmt" - "net" - "path" - - "github.com/containerd/containerd/platforms" - "github.com/google/shlex" - "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type contextKeyT string - -var ( - keyArgs = contextKeyT("llb.exec.args") - keyDir = contextKeyT("llb.exec.dir") - keyEnv = contextKeyT("llb.exec.env") - keyUser = contextKeyT("llb.exec.user") - keyHostname = contextKeyT("llb.exec.hostname") - keyExtraHost = contextKeyT("llb.exec.extrahost") - keyPlatform = contextKeyT("llb.platform") - keyNetwork = contextKeyT("llb.network") - keySecurity = contextKeyT("llb.security") -) - -func AddEnvf(key, value string, v ...interface{}) StateOption { - return addEnvf(key, value, true, v...) -} - -func AddEnv(key, value string) StateOption { - return addEnvf(key, value, false) -} - -func addEnvf(key, value string, replace bool, v ...interface{}) StateOption { - if replace { - value = fmt.Sprintf(value, v...) - } - return func(s State) State { - return s.withValue(keyEnv, func(ctx context.Context) (interface{}, error) { - env, err := getEnv(s)(ctx) - if err != nil { - return nil, err - } - return env.AddOrReplace(key, value), nil - }) - } -} - -func Dir(str string) StateOption { - return dirf(str, false) -} - -func Dirf(str string, v ...interface{}) StateOption { - return dirf(str, true, v...) -} - -func dirf(value string, replace bool, v ...interface{}) StateOption { - if replace { - value = fmt.Sprintf(value, v...) - } - return func(s State) State { - return s.withValue(keyDir, func(ctx context.Context) (interface{}, error) { - if !path.IsAbs(value) { - prev, err := getDir(s)(ctx) - if err != nil { - return nil, err - } - if prev == "" { - prev = "/" - } - value = path.Join(prev, value) - } - return value, nil - }) - } -} - -func User(str string) StateOption { - return func(s State) State { - return s.WithValue(keyUser, str) - } -} - -func Reset(other State) StateOption { - return func(s State) State { - s = NewState(s.Output()) - s.prev = &other - return s - } -} - -func getEnv(s State) func(context.Context) (EnvList, error) { - return func(ctx context.Context) (EnvList, error) { - v, err := s.getValue(keyEnv)(ctx) - if err != nil { - return nil, err - } - if v != nil { - return v.(EnvList), nil - } - return EnvList{}, nil - } -} - -func getDir(s State) func(context.Context) (string, error) { - return func(ctx context.Context) (string, error) { - v, err := s.getValue(keyDir)(ctx) - if err != nil { - return "", err - } - if v != nil { - return v.(string), nil - } - return "", nil - } -} - -func getArgs(s State) func(context.Context) ([]string, error) { - return func(ctx context.Context) ([]string, error) { - v, err := s.getValue(keyArgs)(ctx) - if err != nil { - return nil, err - } - if v != nil { - return v.([]string), nil - } - return nil, nil - } -} - -func getUser(s State) func(context.Context) (string, error) { - return func(ctx context.Context) (string, error) { - v, err := s.getValue(keyUser)(ctx) - if err != nil { - return "", err - } - if v != nil { - return v.(string), nil - } - return "", nil - } -} - -func Hostname(str string) StateOption { - return func(s State) State { - return s.WithValue(keyHostname, str) - } -} - -func getHostname(s State) func(context.Context) (string, error) { - return func(ctx context.Context) (string, error) { - v, err := s.getValue(keyHostname)(ctx) - if err != nil { - return "", err - } - if v != nil { - return v.(string), nil - } - return "", nil - } -} - -func args(args ...string) StateOption { - return func(s State) State { - return s.WithValue(keyArgs, args) - } -} - -func shlexf(str string, replace bool, v ...interface{}) StateOption { - if replace { - str = fmt.Sprintf(str, v...) - } - return func(s State) State { - arg, err := shlex.Split(str) - if err != nil { //nolint - // TODO: handle error - } - return args(arg...)(s) - } -} - -func platform(p specs.Platform) StateOption { - return func(s State) State { - return s.WithValue(keyPlatform, platforms.Normalize(p)) - } -} - -func getPlatform(s State) func(context.Context) (*specs.Platform, error) { - return func(ctx context.Context) (*specs.Platform, error) { - v, err := s.getValue(keyPlatform)(ctx) - if err != nil { - return nil, err - } - if v != nil { - p := v.(specs.Platform) - return &p, nil - } - return nil, nil - } -} - -func extraHost(host string, ip net.IP) StateOption { - return func(s State) State { - return s.withValue(keyExtraHost, func(ctx context.Context) (interface{}, error) { - v, err := getExtraHosts(s)(ctx) - if err != nil { - return nil, err - } - return append(v, HostIP{Host: host, IP: ip}), nil - }) - } -} - -func getExtraHosts(s State) func(context.Context) ([]HostIP, error) { - return func(ctx context.Context) ([]HostIP, error) { - v, err := s.getValue(keyExtraHost)(ctx) - if err != nil { - return nil, err - } - if v != nil { - return v.([]HostIP), nil - } - return nil, nil - } -} - -type HostIP struct { - Host string - IP net.IP -} - -func Network(v pb.NetMode) StateOption { - return func(s State) State { - return s.WithValue(keyNetwork, v) - } -} -func getNetwork(s State) func(context.Context) (pb.NetMode, error) { - return func(ctx context.Context) (pb.NetMode, error) { - v, err := s.getValue(keyNetwork)(ctx) - if err != nil { - return 0, err - } - if v != nil { - n := v.(pb.NetMode) - return n, nil - } - return NetModeSandbox, nil - } -} - -func Security(v pb.SecurityMode) StateOption { - return func(s State) State { - return s.WithValue(keySecurity, v) - } -} -func getSecurity(s State) func(context.Context) (pb.SecurityMode, error) { - return func(ctx context.Context) (pb.SecurityMode, error) { - v, err := s.getValue(keySecurity)(ctx) - if err != nil { - return 0, err - } - if v != nil { - n := v.(pb.SecurityMode) - return n, nil - } - return SecurityModeSandbox, nil - } -} - -type EnvList []KeyValue - -type KeyValue struct { - key string - value string -} - -func (e EnvList) AddOrReplace(k, v string) EnvList { - e = e.Delete(k) - e = append(e, KeyValue{key: k, value: v}) - return e -} - -func (e EnvList) SetDefault(k, v string) EnvList { - if _, ok := e.Get(k); !ok { - e = append(e, KeyValue{key: k, value: v}) - } - return e -} - -func (e EnvList) Delete(k string) EnvList { - e = append([]KeyValue(nil), e...) - if i, ok := e.Index(k); ok { - return append(e[:i], e[i+1:]...) - } - return e -} - -func (e EnvList) Get(k string) (string, bool) { - if index, ok := e.Index(k); ok { - return e[index].value, true - } - return "", false -} - -func (e EnvList) Index(k string) (int, bool) { - for i, kv := range e { - if kv.key == k { - return i, true - } - } - return -1, false -} - -func (e EnvList) ToArray() []string { - out := make([]string, 0, len(e)) - for _, kv := range e { - out = append(out, kv.key+"="+kv.value) - } - return out -} diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go deleted file mode 100644 index 31fc395993f8..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/resolver.go +++ /dev/null @@ -1,35 +0,0 @@ -package llb - -import ( - "context" - - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -// WithMetaResolver adds a metadata resolver to an image -func WithMetaResolver(mr ImageMetaResolver) ImageOption { - return imageOptionFunc(func(ii *ImageInfo) { - ii.metaResolver = mr - }) -} - -// ResolveDigest uses the meta resolver to update the ref of image with full digest before marshaling. -// This makes image ref immutable and is recommended if you want to make sure meta resolver data -// matches the image used during the build. -func ResolveDigest(v bool) ImageOption { - return imageOptionFunc(func(ii *ImageInfo) { - ii.resolveDigest = v - }) -} - -// ImageMetaResolver can resolve image config metadata from a reference -type ImageMetaResolver interface { - ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (digest.Digest, []byte, error) -} - -type ResolveImageConfigOpt struct { - Platform *specs.Platform - ResolveMode string - LogName string -} diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go deleted file mode 100644 index 7d389e8bda1f..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/source.go +++ /dev/null @@ -1,519 +0,0 @@ -package llb - -import ( - "context" - _ "crypto/sha256" // for opencontainers/go-digest - "encoding/json" - "os" - "strconv" - "strings" - - "github.com/docker/distribution/reference" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/util/gitutil" - "github.com/moby/buildkit/util/sshutil" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" -) - -type SourceOp struct { - MarshalCache - id string - attrs map[string]string - output Output - constraints Constraints - err error -} - -func NewSource(id string, attrs map[string]string, c Constraints) *SourceOp { - s := &SourceOp{ - id: id, - attrs: attrs, - constraints: c, - } - s.output = &output{vertex: s, platform: c.Platform} - return s -} - -func (s *SourceOp) Validate(ctx context.Context) error { - if s.err != nil { - return s.err - } - if s.id == "" { - return errors.Errorf("source identifier can't be empty") - } - return nil -} - -func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { - if s.Cached(constraints) { - return s.Load() - } - if err := s.Validate(ctx); err != nil { - return "", nil, nil, nil, err - } - - if strings.HasPrefix(s.id, "local://") { - if _, hasSession := s.attrs[pb.AttrLocalSessionID]; !hasSession { - uid := s.constraints.LocalUniqueID - if uid == "" { - uid = constraints.LocalUniqueID - } - s.attrs[pb.AttrLocalUniqueID] = uid - addCap(&s.constraints, pb.CapSourceLocalUnique) - } - } - proto, md := MarshalConstraints(constraints, &s.constraints) - - proto.Op = &pb.Op_Source{ - Source: &pb.SourceOp{Identifier: s.id, Attrs: s.attrs}, - } - - if !platformSpecificSource(s.id) { - proto.Platform = nil - } - - dt, err := proto.Marshal() - if err != nil { - return "", nil, nil, nil, err - } - - s.Store(dt, md, s.constraints.SourceLocations, constraints) - return s.Load() -} - -func (s *SourceOp) Output() Output { - return s.output -} - -func (s *SourceOp) Inputs() []Output { - return nil -} - -func Image(ref string, opts ...ImageOption) State { - r, err := reference.ParseNormalizedNamed(ref) - if err == nil { - r = reference.TagNameOnly(r) - ref = r.String() - } - var info ImageInfo - for _, opt := range opts { - opt.SetImageOption(&info) - } - - addCap(&info.Constraints, pb.CapSourceImage) - - attrs := map[string]string{} - if info.resolveMode != 0 { - attrs[pb.AttrImageResolveMode] = info.resolveMode.String() - if info.resolveMode == ResolveModeForcePull { - addCap(&info.Constraints, pb.CapSourceImageResolveMode) // only require cap for security enforced mode - } - } - - if info.RecordType != "" { - attrs[pb.AttrImageRecordType] = info.RecordType - } - - src := NewSource("docker-image://"+ref, attrs, info.Constraints) // controversial - if err != nil { - src.err = err - } else if info.metaResolver != nil { - if _, ok := r.(reference.Digested); ok || !info.resolveDigest { - return NewState(src.Output()).Async(func(ctx context.Context, st State) (State, error) { - _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ - Platform: info.Constraints.Platform, - ResolveMode: info.resolveMode.String(), - }) - if err != nil { - return State{}, err - } - return st.WithImageConfig(dt) - }) - } - return Scratch().Async(func(ctx context.Context, _ State) (State, error) { - dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ - Platform: info.Constraints.Platform, - ResolveMode: info.resolveMode.String(), - }) - if err != nil { - return State{}, err - } - if dgst != "" { - r, err = reference.WithDigest(r, dgst) - if err != nil { - return State{}, err - } - } - return NewState(NewSource("docker-image://"+r.String(), attrs, info.Constraints).Output()).WithImageConfig(dt) - }) - } - return NewState(src.Output()) -} - -type ImageOption interface { - SetImageOption(*ImageInfo) -} - -type imageOptionFunc func(*ImageInfo) - -func (fn imageOptionFunc) SetImageOption(ii *ImageInfo) { - fn(ii) -} - -var MarkImageInternal = imageOptionFunc(func(ii *ImageInfo) { - ii.RecordType = "internal" -}) - -type ResolveMode int - -const ( - ResolveModeDefault ResolveMode = iota - ResolveModeForcePull - ResolveModePreferLocal -) - -func (r ResolveMode) SetImageOption(ii *ImageInfo) { - ii.resolveMode = r -} - -func (r ResolveMode) String() string { - switch r { - case ResolveModeDefault: - return pb.AttrImageResolveModeDefault - case ResolveModeForcePull: - return pb.AttrImageResolveModeForcePull - case ResolveModePreferLocal: - return pb.AttrImageResolveModePreferLocal - default: - return "" - } -} - -type ImageInfo struct { - constraintsWrapper - metaResolver ImageMetaResolver - resolveDigest bool - resolveMode ResolveMode - RecordType string -} - -func Git(remote, ref string, opts ...GitOption) State { - url := strings.Split(remote, "#")[0] - - var protocolType int - remote, protocolType = gitutil.ParseProtocol(remote) - - var sshHost string - if protocolType == gitutil.SSHProtocol { - parts := strings.SplitN(remote, ":", 2) - if len(parts) == 2 { - sshHost = parts[0] - // keep remote consistent with http(s) version - remote = parts[0] + "/" + parts[1] - } - } - if protocolType == gitutil.UnknownProtocol { - url = "https://" + url - } - - id := remote - - if ref != "" { - id += "#" + ref - } - - gi := &GitInfo{ - AuthHeaderSecret: "GIT_AUTH_HEADER", - AuthTokenSecret: "GIT_AUTH_TOKEN", - } - for _, o := range opts { - o.SetGitOption(gi) - } - attrs := map[string]string{} - if gi.KeepGitDir { - attrs[pb.AttrKeepGitDir] = "true" - addCap(&gi.Constraints, pb.CapSourceGitKeepDir) - } - if url != "" { - attrs[pb.AttrFullRemoteURL] = url - addCap(&gi.Constraints, pb.CapSourceGitFullURL) - } - if gi.AuthTokenSecret != "" { - attrs[pb.AttrAuthTokenSecret] = gi.AuthTokenSecret - if gi.addAuthCap { - addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth) - } - } - if gi.AuthHeaderSecret != "" { - attrs[pb.AttrAuthHeaderSecret] = gi.AuthHeaderSecret - if gi.addAuthCap { - addCap(&gi.Constraints, pb.CapSourceGitHTTPAuth) - } - } - if protocolType == gitutil.SSHProtocol { - if gi.KnownSSHHosts != "" { - attrs[pb.AttrKnownSSHHosts] = gi.KnownSSHHosts - } else if sshHost != "" { - keyscan, err := sshutil.SSHKeyScan(sshHost) - if err == nil { - // best effort - attrs[pb.AttrKnownSSHHosts] = keyscan - } - } - addCap(&gi.Constraints, pb.CapSourceGitKnownSSHHosts) - - if gi.MountSSHSock == "" { - attrs[pb.AttrMountSSHSock] = "default" - } else { - attrs[pb.AttrMountSSHSock] = gi.MountSSHSock - } - addCap(&gi.Constraints, pb.CapSourceGitMountSSHSock) - } - - addCap(&gi.Constraints, pb.CapSourceGit) - - source := NewSource("git://"+id, attrs, gi.Constraints) - return NewState(source.Output()) -} - -type GitOption interface { - SetGitOption(*GitInfo) -} -type gitOptionFunc func(*GitInfo) - -func (fn gitOptionFunc) SetGitOption(gi *GitInfo) { - fn(gi) -} - -type GitInfo struct { - constraintsWrapper - KeepGitDir bool - AuthTokenSecret string - AuthHeaderSecret string - addAuthCap bool - KnownSSHHosts string - MountSSHSock string -} - -func KeepGitDir() GitOption { - return gitOptionFunc(func(gi *GitInfo) { - gi.KeepGitDir = true - }) -} - -func AuthTokenSecret(v string) GitOption { - return gitOptionFunc(func(gi *GitInfo) { - gi.AuthTokenSecret = v - gi.addAuthCap = true - }) -} - -func AuthHeaderSecret(v string) GitOption { - return gitOptionFunc(func(gi *GitInfo) { - gi.AuthHeaderSecret = v - gi.addAuthCap = true - }) -} - -func KnownSSHHosts(key string) GitOption { - key = strings.TrimSuffix(key, "\n") - return gitOptionFunc(func(gi *GitInfo) { - gi.KnownSSHHosts = gi.KnownSSHHosts + key + "\n" - }) -} - -func MountSSHSock(sshID string) GitOption { - return gitOptionFunc(func(gi *GitInfo) { - gi.MountSSHSock = sshID - }) -} - -func Scratch() State { - return NewState(nil) -} - -func Local(name string, opts ...LocalOption) State { - gi := &LocalInfo{} - - for _, o := range opts { - o.SetLocalOption(gi) - } - attrs := map[string]string{} - if gi.SessionID != "" { - attrs[pb.AttrLocalSessionID] = gi.SessionID - addCap(&gi.Constraints, pb.CapSourceLocalSessionID) - } - if gi.IncludePatterns != "" { - attrs[pb.AttrIncludePatterns] = gi.IncludePatterns - addCap(&gi.Constraints, pb.CapSourceLocalIncludePatterns) - } - if gi.FollowPaths != "" { - attrs[pb.AttrFollowPaths] = gi.FollowPaths - addCap(&gi.Constraints, pb.CapSourceLocalFollowPaths) - } - if gi.ExcludePatterns != "" { - attrs[pb.AttrExcludePatterns] = gi.ExcludePatterns - addCap(&gi.Constraints, pb.CapSourceLocalExcludePatterns) - } - if gi.SharedKeyHint != "" { - attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint - addCap(&gi.Constraints, pb.CapSourceLocalSharedKeyHint) - } - - addCap(&gi.Constraints, pb.CapSourceLocal) - - source := NewSource("local://"+name, attrs, gi.Constraints) - return NewState(source.Output()) -} - -type LocalOption interface { - SetLocalOption(*LocalInfo) -} - -type localOptionFunc func(*LocalInfo) - -func (fn localOptionFunc) SetLocalOption(li *LocalInfo) { - fn(li) -} - -func SessionID(id string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - li.SessionID = id - }) -} - -func IncludePatterns(p []string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - if len(p) == 0 { - li.IncludePatterns = "" - return - } - dt, _ := json.Marshal(p) // empty on error - li.IncludePatterns = string(dt) - }) -} - -func FollowPaths(p []string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - if len(p) == 0 { - li.FollowPaths = "" - return - } - dt, _ := json.Marshal(p) // empty on error - li.FollowPaths = string(dt) - }) -} - -func ExcludePatterns(p []string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - if len(p) == 0 { - li.ExcludePatterns = "" - return - } - dt, _ := json.Marshal(p) // empty on error - li.ExcludePatterns = string(dt) - }) -} - -func SharedKeyHint(h string) LocalOption { - return localOptionFunc(func(li *LocalInfo) { - li.SharedKeyHint = h - }) -} - -type LocalInfo struct { - constraintsWrapper - SessionID string - IncludePatterns string - ExcludePatterns string - FollowPaths string - SharedKeyHint string -} - -func HTTP(url string, opts ...HTTPOption) State { - hi := &HTTPInfo{} - for _, o := range opts { - o.SetHTTPOption(hi) - } - attrs := map[string]string{} - if hi.Checksum != "" { - attrs[pb.AttrHTTPChecksum] = hi.Checksum.String() - addCap(&hi.Constraints, pb.CapSourceHTTPChecksum) - } - if hi.Filename != "" { - attrs[pb.AttrHTTPFilename] = hi.Filename - } - if hi.Perm != 0 { - attrs[pb.AttrHTTPPerm] = "0" + strconv.FormatInt(int64(hi.Perm), 8) - addCap(&hi.Constraints, pb.CapSourceHTTPPerm) - } - if hi.UID != 0 { - attrs[pb.AttrHTTPUID] = strconv.Itoa(hi.UID) - addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) - } - if hi.GID != 0 { - attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID) - addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) - } - - addCap(&hi.Constraints, pb.CapSourceHTTP) - source := NewSource(url, attrs, hi.Constraints) - return NewState(source.Output()) -} - -type HTTPInfo struct { - constraintsWrapper - Checksum digest.Digest - Filename string - Perm int - UID int - GID int -} - -type HTTPOption interface { - SetHTTPOption(*HTTPInfo) -} - -type httpOptionFunc func(*HTTPInfo) - -func (fn httpOptionFunc) SetHTTPOption(hi *HTTPInfo) { - fn(hi) -} - -func Checksum(dgst digest.Digest) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.Checksum = dgst - }) -} - -func Chmod(perm os.FileMode) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.Perm = int(perm) & 0777 - }) -} - -func Filename(name string) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.Filename = name - }) -} - -func Chown(uid, gid int) HTTPOption { - return httpOptionFunc(func(hi *HTTPInfo) { - hi.UID = uid - hi.GID = gid - }) -} - -func platformSpecificSource(id string) bool { - return strings.HasPrefix(id, "docker-image://") -} - -func addCap(c *Constraints, id apicaps.CapID) { - if c.Metadata.Caps == nil { - c.Metadata.Caps = make(map[apicaps.CapID]bool) - } - c.Metadata.Caps[id] = true -} diff --git a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go deleted file mode 100644 index 87afde9954c8..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go +++ /dev/null @@ -1,111 +0,0 @@ -package llb - -import ( - "context" - - "github.com/moby/buildkit/solver/pb" - "github.com/opencontainers/go-digest" -) - -type SourceMap struct { - State *State - Definition *Definition - Filename string - Data []byte -} - -func NewSourceMap(st *State, filename string, dt []byte) *SourceMap { - return &SourceMap{ - State: st, - Filename: filename, - Data: dt, - } -} - -func (s *SourceMap) Location(r []*pb.Range) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - if s == nil { - return - } - c.SourceLocations = append(c.SourceLocations, &SourceLocation{ - SourceMap: s, - Ranges: r, - }) - }) -} - -type SourceLocation struct { - SourceMap *SourceMap - Ranges []*pb.Range -} - -type sourceMapCollector struct { - maps []*SourceMap - index map[*SourceMap]int - locations map[digest.Digest][]*SourceLocation -} - -func newSourceMapCollector() *sourceMapCollector { - return &sourceMapCollector{ - index: map[*SourceMap]int{}, - locations: map[digest.Digest][]*SourceLocation{}, - } -} - -func (smc *sourceMapCollector) Add(dgst digest.Digest, ls []*SourceLocation) { - for _, l := range ls { - idx, ok := smc.index[l.SourceMap] - if !ok { - idx = len(smc.maps) - smc.maps = append(smc.maps, l.SourceMap) - } - smc.index[l.SourceMap] = idx - } - smc.locations[dgst] = ls -} - -func (smc *sourceMapCollector) Marshal(ctx context.Context, co ...ConstraintsOpt) (*pb.Source, error) { - s := &pb.Source{ - Locations: make(map[string]*pb.Locations), - } - for _, m := range smc.maps { - def := m.Definition - if def == nil && m.State != nil { - var err error - def, err = m.State.Marshal(ctx, co...) - if err != nil { - return nil, err - } - m.Definition = def - } - - info := &pb.SourceInfo{ - Data: m.Data, - Filename: m.Filename, - } - - if def != nil { - info.Definition = def.ToPB() - } - - s.Infos = append(s.Infos, info) - } - - for dgst, locs := range smc.locations { - pbLocs, ok := s.Locations[dgst.String()] - if !ok { - pbLocs = &pb.Locations{} - } - - for _, loc := range locs { - pbLocs.Locations = append(pbLocs.Locations, &pb.Location{ - SourceIndex: int32(smc.index[loc.SourceMap]), - Ranges: loc.Ranges, - }) - } - - s.Locations[dgst.String()] = pbLocs - } - - return s, nil -} diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go deleted file mode 100644 index eca7164daadf..000000000000 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ /dev/null @@ -1,570 +0,0 @@ -package llb - -import ( - "context" - "encoding/json" - "fmt" - "net" - "strings" - - "github.com/containerd/containerd/platforms" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -type StateOption func(State) State - -type Output interface { - ToInput(context.Context, *Constraints) (*pb.Input, error) - Vertex(context.Context) Vertex -} - -type Vertex interface { - Validate(context.Context) error - Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) - Output() Output - Inputs() []Output -} - -func NewState(o Output) State { - s := State{ - out: o, - }.Dir("/") - s = s.ensurePlatform() - return s -} - -type State struct { - out Output - prev *State - key interface{} - value func(context.Context) (interface{}, error) - opts []ConstraintsOpt - async *asyncState -} - -func (s State) ensurePlatform() State { - if o, ok := s.out.(interface { - Platform() *specs.Platform - }); ok { - if p := o.Platform(); p != nil { - s = platform(*p)(s) - } - } - return s -} - -func (s State) WithValue(k, v interface{}) State { - return s.withValue(k, func(context.Context) (interface{}, error) { - return v, nil - }) -} - -func (s State) withValue(k interface{}, v func(context.Context) (interface{}, error)) State { - return State{ - out: s.Output(), - prev: &s, // doesn't need to be original pointer - key: k, - value: v, - } -} - -func (s State) Value(ctx context.Context, k interface{}) (interface{}, error) { - return s.getValue(k)(ctx) -} - -func (s State) getValue(k interface{}) func(context.Context) (interface{}, error) { - if s.key == k { - return s.value - } - if s.async != nil { - return func(ctx context.Context) (interface{}, error) { - err := s.async.Do(ctx) - if err != nil { - return nil, err - } - return s.async.target.getValue(k)(ctx) - } - } - if s.prev == nil { - return nilValue - } - return s.prev.getValue(k) -} - -func (s State) Async(f func(context.Context, State) (State, error)) State { - s2 := State{ - async: &asyncState{f: f, prev: s}, - } - return s2 -} - -func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State { - s.opts = co - return s -} - -func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition, error) { - def := &Definition{ - Metadata: make(map[digest.Digest]pb.OpMetadata, 0), - } - if s.Output() == nil || s.Output().Vertex(ctx) == nil { - return def, nil - } - - defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) - c := &Constraints{ - Platform: &defaultPlatform, - LocalUniqueID: identity.NewID(), - } - for _, o := range append(s.opts, co...) { - o.SetConstraintsOption(c) - } - - smc := newSourceMapCollector() - - def, err := marshal(ctx, s.Output().Vertex(ctx), def, smc, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c) - if err != nil { - return def, err - } - inp, err := s.Output().ToInput(ctx, c) - if err != nil { - return def, err - } - proto := &pb.Op{Inputs: []*pb.Input{inp}} - dt, err := proto.Marshal() - if err != nil { - return def, err - } - def.Def = append(def.Def, dt) - - dgst := digest.FromBytes(dt) - md := def.Metadata[dgst] - md.Caps = map[apicaps.CapID]bool{ - pb.CapConstraints: true, - pb.CapPlatform: true, - } - - for _, m := range def.Metadata { - if m.IgnoreCache { - md.Caps[pb.CapMetaIgnoreCache] = true - } - if m.Description != nil { - md.Caps[pb.CapMetaDescription] = true - } - if m.ExportCache != nil { - md.Caps[pb.CapMetaExportCache] = true - } - } - - def.Metadata[dgst] = md - sm, err := smc.Marshal(ctx, co...) - if err != nil { - return nil, err - } - def.Source = sm - - return def, nil -} - -func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollector, cache map[digest.Digest]struct{}, vertexCache map[Vertex]struct{}, c *Constraints) (*Definition, error) { - if _, ok := vertexCache[v]; ok { - return def, nil - } - for _, inp := range v.Inputs() { - var err error - def, err = marshal(ctx, inp.Vertex(ctx), def, s, cache, vertexCache, c) - if err != nil { - return def, err - } - } - - dgst, dt, opMeta, sls, err := v.Marshal(ctx, c) - if err != nil { - return def, err - } - vertexCache[v] = struct{}{} - if opMeta != nil { - def.Metadata[dgst] = mergeMetadata(def.Metadata[dgst], *opMeta) - } - if _, ok := cache[dgst]; ok { - return def, nil - } - s.Add(dgst, sls) - def.Def = append(def.Def, dt) - cache[dgst] = struct{}{} - return def, nil -} - -func (s State) Validate(ctx context.Context) error { - return s.Output().Vertex(ctx).Validate(ctx) -} - -func (s State) Output() Output { - if s.async != nil { - return s.async.Output() - } - return s.out -} - -func (s State) WithOutput(o Output) State { - prev := s - s = State{ - out: o, - prev: &prev, - } - s = s.ensurePlatform() - return s -} - -func (s State) WithImageConfig(c []byte) (State, error) { - var img struct { - Config struct { - Env []string `json:"Env,omitempty"` - WorkingDir string `json:"WorkingDir,omitempty"` - User string `json:"User,omitempty"` - } `json:"config,omitempty"` - } - if err := json.Unmarshal(c, &img); err != nil { - return State{}, err - } - for _, env := range img.Config.Env { - parts := strings.SplitN(env, "=", 2) - if len(parts[0]) > 0 { - var v string - if len(parts) > 1 { - v = parts[1] - } - s = s.AddEnv(parts[0], v) - } - } - s = s.Dir(img.Config.WorkingDir) - return s, nil -} - -func (s State) Run(ro ...RunOption) ExecState { - ei := &ExecInfo{State: s} - for _, o := range ro { - o.SetRunOption(ei) - } - exec := NewExecOp(ei.State, ei.ProxyEnv, ei.ReadonlyRootFS, ei.Constraints) - for _, m := range ei.Mounts { - exec.AddMount(m.Target, m.Source, m.Opts...) - } - exec.secrets = ei.Secrets - exec.ssh = ei.SSH - - return ExecState{ - State: s.WithOutput(exec.Output()), - exec: exec, - } -} - -func (s State) File(a *FileAction, opts ...ConstraintsOpt) State { - var c Constraints - for _, o := range opts { - o.SetConstraintsOption(&c) - } - - return s.WithOutput(NewFileOp(s, a, c).Output()) -} - -func (s State) AddEnv(key, value string) State { - return AddEnv(key, value)(s) -} - -func (s State) AddEnvf(key, value string, v ...interface{}) State { - return AddEnvf(key, value, v...)(s) -} - -func (s State) Dir(str string) State { - return Dir(str)(s) -} -func (s State) Dirf(str string, v ...interface{}) State { - return Dirf(str, v...)(s) -} - -func (s State) GetEnv(ctx context.Context, key string) (string, bool, error) { - env, err := getEnv(s)(ctx) - if err != nil { - return "", false, err - } - v, ok := env.Get(key) - return v, ok, nil -} - -func (s State) Env(ctx context.Context) ([]string, error) { - env, err := getEnv(s)(ctx) - if err != nil { - return nil, err - } - return env.ToArray(), nil -} - -func (s State) GetDir(ctx context.Context) (string, error) { - return getDir(s)(ctx) -} - -func (s State) GetArgs(ctx context.Context) ([]string, error) { - return getArgs(s)(ctx) -} - -func (s State) Reset(s2 State) State { - return Reset(s2)(s) -} - -func (s State) User(v string) State { - return User(v)(s) -} - -func (s State) Hostname(v string) State { - return Hostname(v)(s) -} - -func (s State) GetHostname(ctx context.Context) (string, error) { - return getHostname(s)(ctx) -} - -func (s State) Platform(p specs.Platform) State { - return platform(p)(s) -} - -func (s State) GetPlatform(ctx context.Context) (*specs.Platform, error) { - return getPlatform(s)(ctx) -} - -func (s State) Network(n pb.NetMode) State { - return Network(n)(s) -} - -func (s State) GetNetwork(ctx context.Context) (pb.NetMode, error) { - return getNetwork(s)(ctx) -} -func (s State) Security(n pb.SecurityMode) State { - return Security(n)(s) -} - -func (s State) GetSecurity(ctx context.Context) (pb.SecurityMode, error) { - return getSecurity(s)(ctx) -} - -func (s State) With(so ...StateOption) State { - for _, o := range so { - s = o(s) - } - return s -} - -func (s State) AddExtraHost(host string, ip net.IP) State { - return extraHost(host, ip)(s) -} - -func (s State) isFileOpCopyInput() {} - -type output struct { - vertex Vertex - getIndex func() (pb.OutputIndex, error) - err error - platform *specs.Platform -} - -func (o *output) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error) { - if o.err != nil { - return nil, o.err - } - var index pb.OutputIndex - if o.getIndex != nil { - var err error - index, err = o.getIndex() - if err != nil { - return nil, err - } - } - dgst, _, _, _, err := o.vertex.Marshal(ctx, c) - if err != nil { - return nil, err - } - return &pb.Input{Digest: dgst, Index: index}, nil -} - -func (o *output) Vertex(context.Context) Vertex { - return o.vertex -} - -func (o *output) Platform() *specs.Platform { - return o.platform -} - -type ConstraintsOpt interface { - SetConstraintsOption(*Constraints) - RunOption - LocalOption - HTTPOption - ImageOption - GitOption -} - -type constraintsOptFunc func(m *Constraints) - -func (fn constraintsOptFunc) SetConstraintsOption(m *Constraints) { - fn(m) -} - -func (fn constraintsOptFunc) SetRunOption(ei *ExecInfo) { - ei.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetLocalOption(li *LocalInfo) { - li.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetHTTPOption(hi *HTTPInfo) { - hi.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetImageOption(ii *ImageInfo) { - ii.applyConstraints(fn) -} - -func (fn constraintsOptFunc) SetGitOption(gi *GitInfo) { - gi.applyConstraints(fn) -} - -func mergeMetadata(m1, m2 pb.OpMetadata) pb.OpMetadata { - if m2.IgnoreCache { - m1.IgnoreCache = true - } - if len(m2.Description) > 0 { - if m1.Description == nil { - m1.Description = make(map[string]string) - } - for k, v := range m2.Description { - m1.Description[k] = v - } - } - if m2.ExportCache != nil { - m1.ExportCache = m2.ExportCache - } - - for k := range m2.Caps { - if m1.Caps == nil { - m1.Caps = make(map[apicaps.CapID]bool, len(m2.Caps)) - } - m1.Caps[k] = true - } - - return m1 -} - -var IgnoreCache = constraintsOptFunc(func(c *Constraints) { - c.Metadata.IgnoreCache = true -}) - -func WithDescription(m map[string]string) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - if c.Metadata.Description == nil { - c.Metadata.Description = map[string]string{} - } - for k, v := range m { - c.Metadata.Description[k] = v - } - }) -} - -func WithCustomName(name string) ConstraintsOpt { - return WithDescription(map[string]string{ - "llb.customname": name, - }) -} - -func WithCustomNamef(name string, a ...interface{}) ConstraintsOpt { - return WithCustomName(fmt.Sprintf(name, a...)) -} - -// WithExportCache forces results for this vertex to be exported with the cache -func WithExportCache() ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.Metadata.ExportCache = &pb.ExportCache{Value: true} - }) -} - -// WithoutExportCache sets results for this vertex to be not exported with -// the cache -func WithoutExportCache() ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - // ExportCache with value false means to disable exporting - c.Metadata.ExportCache = &pb.ExportCache{Value: false} - }) -} - -// WithoutDefaultExportCache resets the cache export for the vertex to use -// the default defined by the build configuration. -func WithoutDefaultExportCache() ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - // nil means no vertex based config has been set - c.Metadata.ExportCache = nil - }) -} - -// WithCaps exposes supported LLB caps to the marshaler -func WithCaps(caps apicaps.CapSet) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.Caps = &caps - }) -} - -type constraintsWrapper struct { - Constraints -} - -func (cw *constraintsWrapper) applyConstraints(f func(c *Constraints)) { - f(&cw.Constraints) -} - -type Constraints struct { - Platform *specs.Platform - WorkerConstraints []string - Metadata pb.OpMetadata - LocalUniqueID string - Caps *apicaps.CapSet - SourceLocations []*SourceLocation -} - -func Platform(p specs.Platform) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.Platform = &p - }) -} - -func LocalUniqueID(v string) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - c.LocalUniqueID = v - }) -} - -var ( - LinuxAmd64 = Platform(specs.Platform{OS: "linux", Architecture: "amd64"}) - LinuxArmhf = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"}) - LinuxArm = LinuxArmhf - LinuxArmel = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"}) - LinuxArm64 = Platform(specs.Platform{OS: "linux", Architecture: "arm64"}) - LinuxS390x = Platform(specs.Platform{OS: "linux", Architecture: "s390x"}) - LinuxPpc64le = Platform(specs.Platform{OS: "linux", Architecture: "ppc64le"}) - Darwin = Platform(specs.Platform{OS: "darwin", Architecture: "amd64"}) - Windows = Platform(specs.Platform{OS: "windows", Architecture: "amd64"}) -) - -func Require(filters ...string) ConstraintsOpt { - return constraintsOptFunc(func(c *Constraints) { - for _, f := range filters { - c.WorkerConstraints = append(c.WorkerConstraints, f) - } - }) -} - -func nilValue(context.Context) (interface{}, error) { - return nil, nil -} diff --git a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go deleted file mode 100644 index 7bc583443aae..000000000000 --- a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go +++ /dev/null @@ -1,113 +0,0 @@ -package ociindex - -import ( - "encoding/json" - "io/ioutil" - "os" - - "github.com/gofrs/flock" - v1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -const ( - // IndexJSONLockFileSuffix is the suffix of the lock file - IndexJSONLockFileSuffix = ".lock" -) - -// PutDescToIndex puts desc to index with tag. -// Existing manifests with the same tag will be removed from the index. -func PutDescToIndex(index *v1.Index, desc v1.Descriptor, tag string) error { - if index == nil { - index = &v1.Index{} - } - if index.SchemaVersion == 0 { - index.SchemaVersion = 2 - } - if tag != "" { - if desc.Annotations == nil { - desc.Annotations = make(map[string]string) - } - desc.Annotations[v1.AnnotationRefName] = tag - // remove existing manifests with the same tag - var manifests []v1.Descriptor - for _, m := range index.Manifests { - if m.Annotations[v1.AnnotationRefName] != tag { - manifests = append(manifests, m) - } - } - index.Manifests = manifests - } - index.Manifests = append(index.Manifests, desc) - return nil -} - -func PutDescToIndexJSONFileLocked(indexJSONPath string, desc v1.Descriptor, tag string) error { - lockPath := indexJSONPath + IndexJSONLockFileSuffix - lock := flock.New(lockPath) - locked, err := lock.TryLock() - if err != nil { - return errors.Wrapf(err, "could not lock %s", lockPath) - } - if !locked { - return errors.Errorf("could not lock %s", lockPath) - } - defer func() { - lock.Unlock() - os.RemoveAll(lockPath) - }() - f, err := os.OpenFile(indexJSONPath, os.O_RDWR|os.O_CREATE, 0644) - if err != nil { - return errors.Wrapf(err, "could not open %s", indexJSONPath) - } - defer f.Close() - var idx v1.Index - b, err := ioutil.ReadAll(f) - if err != nil { - return errors.Wrapf(err, "could not read %s", indexJSONPath) - } - if len(b) > 0 { - if err := json.Unmarshal(b, &idx); err != nil { - return errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) - } - } - if err = PutDescToIndex(&idx, desc, tag); err != nil { - return err - } - b, err = json.Marshal(idx) - if err != nil { - return err - } - if _, err = f.WriteAt(b, 0); err != nil { - return err - } - if err = f.Truncate(int64(len(b))); err != nil { - return err - } - return nil -} - -func ReadIndexJSONFileLocked(indexJSONPath string) (*v1.Index, error) { - lockPath := indexJSONPath + IndexJSONLockFileSuffix - lock := flock.New(lockPath) - locked, err := lock.TryRLock() - if err != nil { - return nil, errors.Wrapf(err, "could not lock %s", lockPath) - } - if !locked { - return nil, errors.Errorf("could not lock %s", lockPath) - } - defer func() { - lock.Unlock() - os.RemoveAll(lockPath) - }() - b, err := ioutil.ReadFile(indexJSONPath) - if err != nil { - return nil, errors.Wrapf(err, "could not read %s", indexJSONPath) - } - var idx v1.Index - if err := json.Unmarshal(b, &idx); err != nil { - return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) - } - return &idx, nil -} diff --git a/vendor/github.com/moby/buildkit/client/prune.go b/vendor/github.com/moby/buildkit/client/prune.go deleted file mode 100644 index 27fe5dd8cdbc..000000000000 --- a/vendor/github.com/moby/buildkit/client/prune.go +++ /dev/null @@ -1,83 +0,0 @@ -package client - -import ( - "context" - "io" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/pkg/errors" -) - -func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOption) error { - info := &PruneInfo{} - for _, o := range opts { - o.SetPruneOption(info) - } - - req := &controlapi.PruneRequest{ - Filter: info.Filter, - KeepDuration: int64(info.KeepDuration), - KeepBytes: int64(info.KeepBytes), - } - if info.All { - req.All = true - } - cl, err := c.controlClient().Prune(ctx, req) - if err != nil { - return errors.Wrap(err, "failed to call prune") - } - - for { - d, err := cl.Recv() - if err != nil { - if err == io.EOF { - return nil - } - return err - } - if ch != nil { - ch <- UsageInfo{ - ID: d.ID, - Mutable: d.Mutable, - InUse: d.InUse, - Size: d.Size_, - Parent: d.Parent, - CreatedAt: d.CreatedAt, - Description: d.Description, - UsageCount: int(d.UsageCount), - LastUsedAt: d.LastUsedAt, - RecordType: UsageRecordType(d.RecordType), - Shared: d.Shared, - } - } - } -} - -type PruneOption interface { - SetPruneOption(*PruneInfo) -} - -type PruneInfo struct { - Filter []string - All bool - KeepDuration time.Duration - KeepBytes int64 -} - -type pruneOptionFunc func(*PruneInfo) - -func (f pruneOptionFunc) SetPruneOption(pi *PruneInfo) { - f(pi) -} - -var PruneAll = pruneOptionFunc(func(pi *PruneInfo) { - pi.All = true -}) - -func WithKeepOpt(duration time.Duration, bytes int64) PruneOption { - return pruneOptionFunc(func(pi *PruneInfo) { - pi.KeepDuration = duration - pi.KeepBytes = bytes - }) -} diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go deleted file mode 100644 index 1d77bc4588c5..000000000000 --- a/vendor/github.com/moby/buildkit/client/solve.go +++ /dev/null @@ -1,488 +0,0 @@ -package client - -import ( - "context" - "encoding/json" - "io" - "os" - "path/filepath" - "strings" - "time" - - "github.com/containerd/containerd/content" - contentlocal "github.com/containerd/containerd/content/local" - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/client/ociindex" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/session" - sessioncontent "github.com/moby/buildkit/session/content" - "github.com/moby/buildkit/session/filesync" - "github.com/moby/buildkit/session/grpchijack" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/entitlements" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - fstypes "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -type SolveOpt struct { - Exports []ExportEntry - LocalDirs map[string]string - SharedKey string - Frontend string - FrontendAttrs map[string]string - FrontendInputs map[string]llb.State - CacheExports []CacheOptionsEntry - CacheImports []CacheOptionsEntry - Session []session.Attachable - AllowedEntitlements []entitlements.Entitlement - SharedSession *session.Session // TODO: refactor to better session syncing - SessionPreInitialized bool // TODO: refactor to better session syncing -} - -type ExportEntry struct { - Type string - Attrs map[string]string - Output func(map[string]string) (io.WriteCloser, error) // for ExporterOCI and ExporterDocker - OutputDir string // for ExporterLocal -} - -type CacheOptionsEntry struct { - Type string - Attrs map[string]string -} - -// Solve calls Solve on the controller. -// def must be nil if (and only if) opt.Frontend is set. -func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { - defer func() { - if statusChan != nil { - close(statusChan) - } - }() - - if opt.Frontend == "" && def == nil { - return nil, errors.New("invalid empty definition") - } - if opt.Frontend != "" && def != nil { - return nil, errors.Errorf("invalid definition for frontend %s", opt.Frontend) - } - - return c.solve(ctx, def, nil, opt, statusChan) -} - -type runGatewayCB func(ref string, s *session.Session) error - -func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { - if def != nil && runGateway != nil { - return nil, errors.New("invalid with def and cb") - } - - syncedDirs, err := prepareSyncedDirs(def, opt.LocalDirs) - if err != nil { - return nil, err - } - - ref := identity.NewID() - eg, ctx := errgroup.WithContext(ctx) - - statusContext, cancelStatus := context.WithCancel(context.Background()) - defer cancelStatus() - - if span := opentracing.SpanFromContext(ctx); span != nil { - statusContext = opentracing.ContextWithSpan(statusContext, span) - } - - s := opt.SharedSession - - if s == nil { - if opt.SessionPreInitialized { - return nil, errors.Errorf("no session provided for preinitialized option") - } - s, err = session.NewSession(statusContext, defaultSessionName(), opt.SharedKey) - if err != nil { - return nil, errors.Wrap(err, "failed to create session") - } - } - - cacheOpt, err := parseCacheOptions(opt) - if err != nil { - return nil, err - } - - var ex ExportEntry - if len(opt.Exports) > 1 { - return nil, errors.New("currently only single Exports can be specified") - } - if len(opt.Exports) == 1 { - ex = opt.Exports[0] - } - - if !opt.SessionPreInitialized { - if len(syncedDirs) > 0 { - s.Allow(filesync.NewFSSyncProvider(syncedDirs)) - } - - for _, a := range opt.Session { - s.Allow(a) - } - - switch ex.Type { - case ExporterLocal: - if ex.Output != nil { - return nil, errors.New("output file writer is not supported by local exporter") - } - if ex.OutputDir == "" { - return nil, errors.New("output directory is required for local exporter") - } - s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir)) - case ExporterOCI, ExporterDocker, ExporterTar: - if ex.OutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) - } - if ex.Output == nil { - return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type) - } - s.Allow(filesync.NewFSSyncTarget(ex.Output)) - default: - if ex.Output != nil { - return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type) - } - if ex.OutputDir != "" { - return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type) - } - } - - if len(cacheOpt.contentStores) > 0 { - s.Allow(sessioncontent.NewAttachable(cacheOpt.contentStores)) - } - - eg.Go(func() error { - return s.Run(statusContext, grpchijack.Dialer(c.controlClient())) - }) - } - - for k, v := range cacheOpt.frontendAttrs { - opt.FrontendAttrs[k] = v - } - - solveCtx, cancelSolve := context.WithCancel(ctx) - var res *SolveResponse - eg.Go(func() error { - ctx := solveCtx - defer cancelSolve() - - defer func() { // make sure the Status ends cleanly on build errors - go func() { - <-time.After(3 * time.Second) - cancelStatus() - }() - logrus.Debugf("stopping session") - s.Close() - }() - var pbd *pb.Definition - if def != nil { - pbd = def.ToPB() - } - - frontendInputs := make(map[string]*pb.Definition) - for key, st := range opt.FrontendInputs { - def, err := st.Marshal(ctx) - if err != nil { - return err - } - frontendInputs[key] = def.ToPB() - } - - resp, err := c.controlClient().Solve(ctx, &controlapi.SolveRequest{ - Ref: ref, - Definition: pbd, - Exporter: ex.Type, - ExporterAttrs: ex.Attrs, - Session: s.ID(), - Frontend: opt.Frontend, - FrontendAttrs: opt.FrontendAttrs, - FrontendInputs: frontendInputs, - Cache: cacheOpt.options, - Entitlements: opt.AllowedEntitlements, - }) - if err != nil { - return errors.Wrap(err, "failed to solve") - } - res = &SolveResponse{ - ExporterResponse: resp.ExporterResponse, - } - return nil - }) - - if runGateway != nil { - eg.Go(func() error { - err := runGateway(ref, s) - if err == nil { - return nil - } - - // If the callback failed then the main - // `Solve` (called above) should error as - // well. However as a fallback we wait up to - // 5s for that to happen before failing this - // goroutine. - select { - case <-solveCtx.Done(): - case <-time.After(5 * time.Second): - cancelSolve() - } - - return err - }) - } - - eg.Go(func() error { - stream, err := c.controlClient().Status(statusContext, &controlapi.StatusRequest{ - Ref: ref, - }) - if err != nil { - return errors.Wrap(err, "failed to get status") - } - for { - resp, err := stream.Recv() - if err != nil { - if err == io.EOF { - return nil - } - return errors.Wrap(err, "failed to receive status") - } - s := SolveStatus{} - for _, v := range resp.Vertexes { - s.Vertexes = append(s.Vertexes, &Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, - }) - } - for _, v := range resp.Statuses { - s.Statuses = append(s.Statuses, &VertexStatus{ - ID: v.ID, - Vertex: v.Vertex, - Name: v.Name, - Total: v.Total, - Current: v.Current, - Timestamp: v.Timestamp, - Started: v.Started, - Completed: v.Completed, - }) - } - for _, v := range resp.Logs { - s.Logs = append(s.Logs, &VertexLog{ - Vertex: v.Vertex, - Stream: int(v.Stream), - Data: v.Msg, - Timestamp: v.Timestamp, - }) - } - if statusChan != nil { - statusChan <- &s - } - } - }) - - if err := eg.Wait(); err != nil { - return nil, err - } - // Update index.json of exported cache content store - // FIXME(AkihiroSuda): dedupe const definition of cache/remotecache.ExporterResponseManifestDesc = "cache.manifest" - if manifestDescJSON := res.ExporterResponse["cache.manifest"]; manifestDescJSON != "" { - var manifestDesc ocispec.Descriptor - if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil { - return nil, err - } - for indexJSONPath, tag := range cacheOpt.indicesToUpdate { - if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil { - return nil, err - } - } - } - return res, nil -} - -func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) { - for _, d := range localDirs { - fi, err := os.Stat(d) - if err != nil { - return nil, errors.Wrapf(err, "could not find %s", d) - } - if !fi.IsDir() { - return nil, errors.Errorf("%s not a directory", d) - } - } - resetUIDAndGID := func(p string, st *fstypes.Stat) bool { - st.Uid = 0 - st.Gid = 0 - return true - } - - dirs := make([]filesync.SyncedDir, 0, len(localDirs)) - if def == nil { - for name, d := range localDirs { - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) - } - } else { - for _, dt := range def.Def { - var op pb.Op - if err := (&op).Unmarshal(dt); err != nil { - return nil, errors.Wrap(err, "failed to parse llb proto op") - } - if src := op.GetSource(); src != nil { - if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property - name := strings.TrimPrefix(src.Identifier, "local://") - d, ok := localDirs[name] - if !ok { - return nil, errors.Errorf("local directory %s not enabled", name) - } - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) // TODO: excludes - } - } - } - } - return dirs, nil -} - -func defaultSessionName() string { - wd, err := os.Getwd() - if err != nil { - return "unknown" - } - return filepath.Base(wd) -} - -type cacheOptions struct { - options controlapi.CacheOptions - contentStores map[string]content.Store // key: ID of content store ("local:" + csDir) - indicesToUpdate map[string]string // key: index.JSON file name, value: tag - frontendAttrs map[string]string -} - -func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) { - var ( - cacheExports []*controlapi.CacheOptionsEntry - cacheImports []*controlapi.CacheOptionsEntry - // legacy API is used for registry caches, because the daemon might not support the new API - legacyExportRef string - legacyImportRefs []string - ) - contentStores := make(map[string]content.Store) - indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag - frontendAttrs := make(map[string]string) - legacyExportAttrs := make(map[string]string) - for _, ex := range opt.CacheExports { - if ex.Type == "local" { - csDir := ex.Attrs["dest"] - if csDir == "" { - return nil, errors.New("local cache exporter requires dest") - } - if err := os.MkdirAll(csDir, 0755); err != nil { - return nil, err - } - cs, err := contentlocal.NewStore(csDir) - if err != nil { - return nil, err - } - contentStores["local:"+csDir] = cs - // TODO(AkihiroSuda): support custom index JSON path and tag - indexJSONPath := filepath.Join(csDir, "index.json") - indicesToUpdate[indexJSONPath] = "latest" - } - if ex.Type == "registry" && legacyExportRef == "" { - legacyExportRef = ex.Attrs["ref"] - for k, v := range ex.Attrs { - if k != "ref" { - legacyExportAttrs[k] = v - } - } - } else { - cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{ - Type: ex.Type, - Attrs: ex.Attrs, - }) - } - } - for _, im := range opt.CacheImports { - attrs := im.Attrs - if im.Type == "local" { - csDir := im.Attrs["src"] - if csDir == "" { - return nil, errors.New("local cache importer requires src") - } - cs, err := contentlocal.NewStore(csDir) - if err != nil { - logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) - continue - } - // if digest is not specified, load from "latest" tag - if attrs["digest"] == "" { - idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json")) - if err != nil { - logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) - continue - } - for _, m := range idx.Manifests { - if (m.Annotations[ocispec.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispec.AnnotationRefName] == attrs["tag"]) { - attrs["digest"] = string(m.Digest) - break - } - } - if attrs["digest"] == "" { - return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json") - } - } - contentStores["local:"+csDir] = cs - - } - if im.Type == "registry" { - legacyImportRef := attrs["ref"] - legacyImportRefs = append(legacyImportRefs, legacyImportRef) - } else { - cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{ - Type: im.Type, - Attrs: attrs, - }) - } - } - if opt.Frontend != "" { - // use legacy API for registry importers, because the frontend might not support the new API - if len(legacyImportRefs) > 0 { - frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",") - } - // use new API for other importers - if len(cacheImports) > 0 { - s, err := json.Marshal(cacheImports) - if err != nil { - return nil, err - } - frontendAttrs["cache-imports"] = string(s) - } - } - res := cacheOptions{ - options: controlapi.CacheOptions{ - // old API (for registry caches, planned to be removed in early 2019) - ExportRefDeprecated: legacyExportRef, - ExportAttrsDeprecated: legacyExportAttrs, - ImportRefsDeprecated: legacyImportRefs, - // new API - Exports: cacheExports, - Imports: cacheImports, - }, - contentStores: contentStores, - indicesToUpdate: indicesToUpdate, - frontendAttrs: frontendAttrs, - } - return &res, nil -} diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go deleted file mode 100644 index b011ee2efdbf..000000000000 --- a/vendor/github.com/moby/buildkit/client/workers.go +++ /dev/null @@ -1,70 +0,0 @@ -package client - -import ( - "context" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - apitypes "github.com/moby/buildkit/api/types" - "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" -) - -// WorkerInfo contains information about a worker -type WorkerInfo struct { - ID string - Labels map[string]string - Platforms []specs.Platform - GCPolicy []PruneInfo -} - -// ListWorkers lists all active workers -func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) { - info := &ListWorkersInfo{} - for _, o := range opts { - o.SetListWorkersOption(info) - } - - req := &controlapi.ListWorkersRequest{Filter: info.Filter} - resp, err := c.controlClient().ListWorkers(ctx, req) - if err != nil { - return nil, errors.Wrap(err, "failed to list workers") - } - - var wi []*WorkerInfo - - for _, w := range resp.Record { - wi = append(wi, &WorkerInfo{ - ID: w.ID, - Labels: w.Labels, - Platforms: pb.ToSpecPlatforms(w.Platforms), - GCPolicy: fromAPIGCPolicy(w.GCPolicy), - }) - } - - return wi, nil -} - -// ListWorkersOption is an option for a worker list query -type ListWorkersOption interface { - SetListWorkersOption(*ListWorkersInfo) -} - -// ListWorkersInfo is a payload for worker list query -type ListWorkersInfo struct { - Filter []string -} - -func fromAPIGCPolicy(in []*apitypes.GCPolicy) []PruneInfo { - out := make([]PruneInfo, 0, len(in)) - for _, p := range in { - out = append(out, PruneInfo{ - All: p.All, - Filter: p.Filters, - KeepDuration: time.Duration(p.KeepDuration), - KeepBytes: p.KeepBytes, - }) - } - return out -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go deleted file mode 100644 index 09d86cad820c..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go +++ /dev/null @@ -1,134 +0,0 @@ -package client - -import ( - "context" - "io" - - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" - fstypes "github.com/tonistiigi/fsutil/types" -) - -type Client interface { - Solve(ctx context.Context, req SolveRequest) (*Result, error) - ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) - BuildOpts() BuildOpts - Inputs(ctx context.Context) (map[string]llb.State, error) - NewContainer(ctx context.Context, req NewContainerRequest) (Container, error) -} - -// NewContainerRequest encapsulates the requirements for a client to define a -// new container, without defining the initial process. -type NewContainerRequest struct { - Mounts []Mount - NetMode pb.NetMode - Platform *pb.Platform - Constraints *pb.WorkerConstraints -} - -// Mount allows clients to specify a filesystem mount. A Reference to a -// previously solved Result is required. -type Mount struct { - Selector string - Dest string - ResultID string - Ref Reference - Readonly bool - MountType pb.MountType - CacheOpt *pb.CacheOpt - SecretOpt *pb.SecretOpt - SSHOpt *pb.SSHOpt -} - -// Container is used to start new processes inside a container and release the -// container resources when done. -type Container interface { - Start(context.Context, StartRequest) (ContainerProcess, error) - Release(context.Context) error -} - -// StartRequest encapsulates the arguments to define a process within a -// container. -type StartRequest struct { - Args []string - Env []string - User string - Cwd string - Tty bool - Stdin io.ReadCloser - Stdout, Stderr io.WriteCloser - SecurityMode pb.SecurityMode -} - -// WinSize is same as executor.WinSize, copied here to prevent circular package -// dependencies. -type WinSize struct { - Rows uint32 - Cols uint32 -} - -// ContainerProcess represents a process within a container. -type ContainerProcess interface { - Wait() error - Resize(ctx context.Context, size WinSize) error - // TODO Signal(ctx context.Context, sig os.Signal) -} - -type Reference interface { - ToState() (llb.State, error) - ReadFile(ctx context.Context, req ReadRequest) ([]byte, error) - StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error) - ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error) -} - -type ReadRequest struct { - Filename string - Range *FileRange -} - -type FileRange struct { - Offset int - Length int -} - -type ReadDirRequest struct { - Path string - IncludePattern string -} - -type StatRequest struct { - Path string -} - -// SolveRequest is same as frontend.SolveRequest but avoiding dependency -type SolveRequest struct { - Evaluate bool - Definition *pb.Definition - Frontend string - FrontendOpt map[string]string - FrontendInputs map[string]*pb.Definition - CacheImports []CacheOptionsEntry -} - -type CacheOptionsEntry struct { - Type string - Attrs map[string]string -} - -type WorkerInfo struct { - ID string - Labels map[string]string - Platforms []specs.Platform -} - -type BuildOpts struct { - Opts map[string]string - SessionID string - Workers []WorkerInfo - Product string - LLBCaps apicaps.CapSet - Caps apicaps.CapSet -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go deleted file mode 100644 index bd5422847822..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/result.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "context" - "sync" - - "github.com/pkg/errors" -) - -type BuildFunc func(context.Context, Client) (*Result, error) - -type Result struct { - mu sync.Mutex - Ref Reference - Refs map[string]Reference - Metadata map[string][]byte -} - -func NewResult() *Result { - return &Result{} -} - -func (r *Result) AddMeta(k string, v []byte) { - r.mu.Lock() - if r.Metadata == nil { - r.Metadata = map[string][]byte{} - } - r.Metadata[k] = v - r.mu.Unlock() -} - -func (r *Result) AddRef(k string, ref Reference) { - r.mu.Lock() - if r.Refs == nil { - r.Refs = map[string]Reference{} - } - r.Refs[k] = ref - r.mu.Unlock() -} - -func (r *Result) SetRef(ref Reference) { - r.Ref = ref -} - -func (r *Result) SingleRef() (Reference, error) { - r.mu.Lock() - defer r.mu.Unlock() - - if r.Refs != nil && r.Ref == nil { - return nil, errors.Errorf("invalid map result") - } - - return r.Ref, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/errdefs/exit.go b/vendor/github.com/moby/buildkit/frontend/gateway/errdefs/exit.go deleted file mode 100644 index f98a148de4a2..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/errdefs/exit.go +++ /dev/null @@ -1,34 +0,0 @@ -package errdefs - -import "fmt" - -const ( - // UnknownExitStatus might be returned in (*ExitError).ExitCode via - // ContainerProcess.Wait. This can happen if the process never starts - // or if an error was encountered when obtaining the exit status, it is set to 255. - // - // This const is defined here to prevent importing github.com/containerd/containerd - // and corresponds with https://github.com/containerd/containerd/blob/40b22ef0741028917761d8c5d5d29e0d19038836/task.go#L52-L55 - UnknownExitStatus = 255 -) - -// ExitError will be returned when the container process exits with a non-zero -// exit code. -type ExitError struct { - ExitCode uint32 - Err error -} - -func (err *ExitError) Error() string { - if err.Err != nil { - return err.Err.Error() - } - return fmt.Sprintf("exit code: %d", err.ExitCode) -} - -func (err *ExitError) Unwrap() error { - if err.Err == nil { - return fmt.Errorf("exit code: %d", err.ExitCode) - } - return err.Err -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go deleted file mode 100644 index e8a370b7a46e..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ /dev/null @@ -1,1110 +0,0 @@ -package grpcclient - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net" - "os" - "strings" - "sync" - "time" - - "github.com/gogo/googleapis/google/rpc" - gogotypes "github.com/gogo/protobuf/types" - "github.com/golang/protobuf/ptypes/any" - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/frontend/gateway/errdefs" - pb "github.com/moby/buildkit/frontend/gateway/pb" - "github.com/moby/buildkit/identity" - opspb "github.com/moby/buildkit/solver/pb" - "github.com/moby/buildkit/util/apicaps" - "github.com/moby/buildkit/util/grpcerrors" - digest "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - fstypes "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -const frontendPrefix = "BUILDKIT_FRONTEND_OPT_" - -type GrpcClient interface { - client.Client - Run(context.Context, client.BuildFunc) error -} - -func New(ctx context.Context, opts map[string]string, session, product string, c pb.LLBBridgeClient, w []client.WorkerInfo) (GrpcClient, error) { - pingCtx, pingCancel := context.WithTimeout(ctx, 15*time.Second) - defer pingCancel() - resp, err := c.Ping(pingCtx, &pb.PingRequest{}) - if err != nil { - return nil, err - } - - if resp.FrontendAPICaps == nil { - resp.FrontendAPICaps = defaultCaps() - } - - if resp.LLBCaps == nil { - resp.LLBCaps = defaultLLBCaps() - } - - return &grpcClient{ - client: c, - opts: opts, - sessionID: session, - workers: w, - product: product, - caps: pb.Caps.CapSet(resp.FrontendAPICaps), - llbCaps: opspb.Caps.CapSet(resp.LLBCaps), - requests: map[string]*pb.SolveRequest{}, - execMsgs: newMessageForwarder(ctx, c), - }, nil -} - -func current() (GrpcClient, error) { - if ep := product(); ep != "" { - apicaps.ExportedProduct = ep - } - - ctx, conn, err := grpcClientConn(context.Background()) - if err != nil { - return nil, err - } - - return New(ctx, opts(), sessionID(), product(), pb.NewLLBBridgeClient(conn), workers()) -} - -func convertRef(ref client.Reference) (*pb.Ref, error) { - if ref == nil { - return &pb.Ref{}, nil - } - r, ok := ref.(*reference) - if !ok { - return nil, errors.Errorf("invalid return reference type %T", ref) - } - return &pb.Ref{Id: r.id, Def: r.def}, nil -} - -func RunFromEnvironment(ctx context.Context, f client.BuildFunc) error { - client, err := current() - if err != nil { - return errors.Wrapf(err, "failed to initialize client from environment") - } - return client.Run(ctx, f) -} - -func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError error) { - export := c.caps.Supports(pb.CapReturnResult) == nil - - var ( - res *client.Result - err error - ) - if export { - defer func() { - req := &pb.ReturnRequest{} - if retError == nil { - if res == nil { - res = &client.Result{} - } - pbRes := &pb.Result{ - Metadata: res.Metadata, - } - if res.Refs != nil { - if c.caps.Supports(pb.CapProtoRefArray) == nil { - m := map[string]*pb.Ref{} - for k, r := range res.Refs { - pbRef, err := convertRef(r) - if err != nil { - retError = err - continue - } - m[k] = pbRef - } - pbRes.Result = &pb.Result_Refs{Refs: &pb.RefMap{Refs: m}} - } else { - // Server doesn't support the new wire format for refs, so we construct - // a deprecated result ref map. - m := map[string]string{} - for k, r := range res.Refs { - pbRef, err := convertRef(r) - if err != nil { - retError = err - continue - } - m[k] = pbRef.Id - } - pbRes.Result = &pb.Result_RefsDeprecated{RefsDeprecated: &pb.RefMapDeprecated{Refs: m}} - } - } else { - pbRef, err := convertRef(res.Ref) - if err != nil { - retError = err - } else { - if c.caps.Supports(pb.CapProtoRefArray) == nil { - pbRes.Result = &pb.Result_Ref{Ref: pbRef} - } else { - // Server doesn't support the new wire format for refs, so we construct - // a deprecated result ref. - pbRes.Result = &pb.Result_RefDeprecated{RefDeprecated: pbRef.Id} - } - } - } - if retError == nil { - req.Result = pbRes - } - } - if retError != nil { - st, _ := status.FromError(grpcerrors.ToGRPC(retError)) - stp := st.Proto() - req.Error = &rpc.Status{ - Code: stp.Code, - Message: stp.Message, - Details: convertToGogoAny(stp.Details), - } - } - if _, err := c.client.Return(ctx, req); err != nil && retError == nil { - retError = err - } - }() - } - - defer func() { - err = c.execMsgs.Release() - if err != nil && retError != nil { - retError = err - } - }() - - if res, err = f(ctx, c); err != nil { - return err - } - - if res == nil { - return nil - } - - if err := c.caps.Supports(pb.CapReturnMap); len(res.Refs) > 1 && err != nil { - return err - } - - if !export { - exportedAttrBytes, err := json.Marshal(res.Metadata) - if err != nil { - return errors.Wrapf(err, "failed to marshal return metadata") - } - - req, err := c.requestForRef(res.Ref) - if err != nil { - return errors.Wrapf(err, "failed to find return ref") - } - - req.Final = true - req.ExporterAttr = exportedAttrBytes - - if _, err := c.client.Solve(ctx, req); err != nil { - return errors.Wrapf(err, "failed to solve") - } - } - - return nil -} - -// defaultCaps returns the capabilities that were implemented when capabilities -// support was added. This list is frozen and should never be changed. -func defaultCaps() []apicaps.PBCap { - return []apicaps.PBCap{ - {ID: string(pb.CapSolveBase), Enabled: true}, - {ID: string(pb.CapSolveInlineReturn), Enabled: true}, - {ID: string(pb.CapResolveImage), Enabled: true}, - {ID: string(pb.CapReadFile), Enabled: true}, - } -} - -// defaultLLBCaps returns the LLB capabilities that were implemented when capabilities -// support was added. This list is frozen and should never be changed. -func defaultLLBCaps() []apicaps.PBCap { - return []apicaps.PBCap{ - {ID: string(opspb.CapSourceImage), Enabled: true}, - {ID: string(opspb.CapSourceLocal), Enabled: true}, - {ID: string(opspb.CapSourceLocalUnique), Enabled: true}, - {ID: string(opspb.CapSourceLocalSessionID), Enabled: true}, - {ID: string(opspb.CapSourceLocalIncludePatterns), Enabled: true}, - {ID: string(opspb.CapSourceLocalFollowPaths), Enabled: true}, - {ID: string(opspb.CapSourceLocalExcludePatterns), Enabled: true}, - {ID: string(opspb.CapSourceLocalSharedKeyHint), Enabled: true}, - {ID: string(opspb.CapSourceGit), Enabled: true}, - {ID: string(opspb.CapSourceGitKeepDir), Enabled: true}, - {ID: string(opspb.CapSourceGitFullURL), Enabled: true}, - {ID: string(opspb.CapSourceHTTP), Enabled: true}, - {ID: string(opspb.CapSourceHTTPChecksum), Enabled: true}, - {ID: string(opspb.CapSourceHTTPPerm), Enabled: true}, - {ID: string(opspb.CapSourceHTTPUIDGID), Enabled: true}, - {ID: string(opspb.CapBuildOpLLBFileName), Enabled: true}, - {ID: string(opspb.CapExecMetaBase), Enabled: true}, - {ID: string(opspb.CapExecMetaProxy), Enabled: true}, - {ID: string(opspb.CapExecMountBind), Enabled: true}, - {ID: string(opspb.CapExecMountCache), Enabled: true}, - {ID: string(opspb.CapExecMountCacheSharing), Enabled: true}, - {ID: string(opspb.CapExecMountSelector), Enabled: true}, - {ID: string(opspb.CapExecMountTmpfs), Enabled: true}, - {ID: string(opspb.CapExecMountSecret), Enabled: true}, - {ID: string(opspb.CapConstraints), Enabled: true}, - {ID: string(opspb.CapPlatform), Enabled: true}, - {ID: string(opspb.CapMetaIgnoreCache), Enabled: true}, - {ID: string(opspb.CapMetaDescription), Enabled: true}, - {ID: string(opspb.CapMetaExportCache), Enabled: true}, - } -} - -type grpcClient struct { - client pb.LLBBridgeClient - opts map[string]string - sessionID string - product string - workers []client.WorkerInfo - caps apicaps.CapSet - llbCaps apicaps.CapSet - requests map[string]*pb.SolveRequest - execMsgs *messageForwarder -} - -func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, error) { - emptyReq := &pb.SolveRequest{ - Definition: &opspb.Definition{}, - } - if ref == nil { - return emptyReq, nil - } - r, ok := ref.(*reference) - if !ok { - return nil, errors.Errorf("return reference has invalid type %T", ref) - } - if r.id == "" { - return emptyReq, nil - } - req, ok := c.requests[r.id] - if !ok { - return nil, errors.Errorf("did not find request for return reference %s", r.id) - } - return req, nil -} - -func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *client.Result, err error) { - if creq.Definition != nil { - for _, md := range creq.Definition.Metadata { - for cap := range md.Caps { - if err := c.llbCaps.Supports(cap); err != nil { - return nil, err - } - } - } - } - var ( - // old API - legacyRegistryCacheImports []string - // new API (CapImportCaches) - cacheImports []*pb.CacheOptionsEntry - ) - supportCapImportCaches := c.caps.Supports(pb.CapImportCaches) == nil - for _, im := range creq.CacheImports { - if !supportCapImportCaches && im.Type == "registry" { - legacyRegistryCacheImports = append(legacyRegistryCacheImports, im.Attrs["ref"]) - } else { - cacheImports = append(cacheImports, &pb.CacheOptionsEntry{ - Type: im.Type, - Attrs: im.Attrs, - }) - } - } - - req := &pb.SolveRequest{ - Definition: creq.Definition, - Frontend: creq.Frontend, - FrontendOpt: creq.FrontendOpt, - FrontendInputs: creq.FrontendInputs, - AllowResultReturn: true, - AllowResultArrayRef: true, - // old API - ImportCacheRefsDeprecated: legacyRegistryCacheImports, - // new API - CacheImports: cacheImports, - } - - // backwards compatibility with inline return - if c.caps.Supports(pb.CapReturnResult) != nil { - req.ExporterAttr = []byte("{}") - } - - if creq.Evaluate { - if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil { - req.Evaluate = creq.Evaluate - } else { - // If evaluate is not supported, fallback to running Stat(".") in order to - // trigger an evaluation of the result. - defer func() { - if res == nil { - return - } - - var ( - id string - ref client.Reference - ) - ref, err = res.SingleRef() - if err != nil { - for refID := range res.Refs { - id = refID - break - } - } else { - id = ref.(*reference).id - } - - _, err = c.client.StatFile(ctx, &pb.StatFileRequest{ - Ref: id, - Path: ".", - }) - }() - } - } - - resp, err := c.client.Solve(ctx, req) - if err != nil { - return nil, err - } - - res = &client.Result{} - if resp.Result == nil { - if id := resp.Ref; id != "" { - c.requests[id] = req - } - res.SetRef(&reference{id: resp.Ref, c: c}) - } else { - res.Metadata = resp.Result.Metadata - switch pbRes := resp.Result.Result.(type) { - case *pb.Result_RefDeprecated: - if id := pbRes.RefDeprecated; id != "" { - res.SetRef(&reference{id: id, c: c}) - } - case *pb.Result_RefsDeprecated: - for k, v := range pbRes.RefsDeprecated.Refs { - ref := &reference{id: v, c: c} - if v == "" { - ref = nil - } - res.AddRef(k, ref) - } - case *pb.Result_Ref: - if pbRes.Ref.Id != "" { - ref, err := newReference(c, pbRes.Ref) - if err != nil { - return nil, err - } - res.SetRef(ref) - } - case *pb.Result_Refs: - for k, v := range pbRes.Refs.Refs { - var ref *reference - if v.Id != "" { - ref, err = newReference(c, v) - if err != nil { - return nil, err - } - } - res.AddRef(k, ref) - } - } - } - - return res, nil -} - -func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) { - var p *opspb.Platform - if platform := opt.Platform; platform != nil { - p = &opspb.Platform{ - OS: platform.OS, - Architecture: platform.Architecture, - Variant: platform.Variant, - OSVersion: platform.OSVersion, - OSFeatures: platform.OSFeatures, - } - } - resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{Ref: ref, Platform: p, ResolveMode: opt.ResolveMode, LogName: opt.LogName}) - if err != nil { - return "", nil, err - } - return resp.Digest, resp.Config, nil -} - -func (c *grpcClient) BuildOpts() client.BuildOpts { - return client.BuildOpts{ - Opts: c.opts, - SessionID: c.sessionID, - Workers: c.workers, - Product: c.product, - LLBCaps: c.llbCaps, - Caps: c.caps, - } -} - -func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) { - err := c.caps.Supports(pb.CapFrontendInputs) - if err != nil { - return nil, err - } - - resp, err := c.client.Inputs(ctx, &pb.InputsRequest{}) - if err != nil { - return nil, err - } - - inputs := make(map[string]llb.State) - for key, def := range resp.Definitions { - op, err := llb.NewDefinitionOp(def) - if err != nil { - return nil, err - } - inputs[key] = llb.NewState(op) - } - return inputs, nil -} - -// procMessageForwarder is created per container process to act as the -// communication channel between the process and the ExecProcess message -// stream. -type procMessageForwarder struct { - done chan struct{} - closeOnce sync.Once - msgs chan *pb.ExecMessage -} - -func newProcMessageForwarder() *procMessageForwarder { - return &procMessageForwarder{ - done: make(chan struct{}), - msgs: make(chan *pb.ExecMessage), - } -} - -func (b *procMessageForwarder) Send(ctx context.Context, m *pb.ExecMessage) { - select { - case <-ctx.Done(): - case <-b.done: - b.closeOnce.Do(func() { - close(b.msgs) - }) - case b.msgs <- m: - } -} - -func (b *procMessageForwarder) Recv(ctx context.Context) (m *pb.ExecMessage, ok bool) { - select { - case <-ctx.Done(): - return nil, true - case <-b.done: - return nil, false - case m = <-b.msgs: - return m, true - } -} - -func (b *procMessageForwarder) Close() { - close(b.done) - b.Recv(context.Background()) // flush any messages in queue - b.Send(context.Background(), nil) // ensure channel is closed -} - -// messageForwarder manages a single grpc stream for ExecProcess to facilitate -// a pub/sub message channel for each new process started from the client -// connection. -type messageForwarder struct { - client pb.LLBBridgeClient - ctx context.Context - cancel func() - eg *errgroup.Group - mu sync.Mutex - pids map[string]*procMessageForwarder - stream pb.LLBBridge_ExecProcessClient - // startOnce used to only start the exec message forwarder once, - // so we only have one exec stream per client - startOnce sync.Once - // startErr tracks the error when initializing the stream, it will - // be returned on subsequent calls to Start - startErr error -} - -func newMessageForwarder(ctx context.Context, client pb.LLBBridgeClient) *messageForwarder { - ctx, cancel := context.WithCancel(ctx) - eg, ctx := errgroup.WithContext(ctx) - return &messageForwarder{ - client: client, - pids: map[string]*procMessageForwarder{}, - ctx: ctx, - cancel: cancel, - eg: eg, - } -} - -func (m *messageForwarder) Start() (err error) { - defer func() { - if err != nil { - m.startErr = err - } - }() - - if m.startErr != nil { - return m.startErr - } - - m.startOnce.Do(func() { - m.stream, err = m.client.ExecProcess(m.ctx) - if err != nil { - return - } - m.eg.Go(func() error { - for { - msg, err := m.stream.Recv() - if errors.Is(err, io.EOF) || grpcerrors.Code(err) == codes.Canceled { - return nil - } - logrus.Debugf("|<--- %s", debugMessage(msg)) - - if err != nil { - return err - } - - m.mu.Lock() - msgs, ok := m.pids[msg.ProcessID] - m.mu.Unlock() - - if !ok { - logrus.Debugf("Received exec message for unregistered process: %s", msg.String()) - continue - } - msgs.Send(m.ctx, msg) - } - }) - }) - return err -} - -func debugMessage(msg *pb.ExecMessage) string { - switch m := msg.GetInput().(type) { - case *pb.ExecMessage_Init: - return fmt.Sprintf("Init Message %s", msg.ProcessID) - case *pb.ExecMessage_File: - if m.File.EOF { - return fmt.Sprintf("File Message %s, fd=%d, EOF", msg.ProcessID, m.File.Fd) - } - return fmt.Sprintf("File Message %s, fd=%d, %d bytes", msg.ProcessID, m.File.Fd, len(m.File.Data)) - case *pb.ExecMessage_Resize: - return fmt.Sprintf("Resize Message %s", msg.ProcessID) - case *pb.ExecMessage_Started: - return fmt.Sprintf("Started Message %s", msg.ProcessID) - case *pb.ExecMessage_Exit: - return fmt.Sprintf("Exit Message %s, code=%d, err=%s", msg.ProcessID, m.Exit.Code, m.Exit.Error) - case *pb.ExecMessage_Done: - return fmt.Sprintf("Done Message %s", msg.ProcessID) - } - return fmt.Sprintf("Unknown Message %s", msg.String()) -} - -func (m *messageForwarder) Send(msg *pb.ExecMessage) error { - m.mu.Lock() - _, ok := m.pids[msg.ProcessID] - defer m.mu.Unlock() - if !ok { - return errors.Errorf("process %s has ended, not sending message %#v", msg.ProcessID, msg.Input) - } - logrus.Debugf("|---> %s", debugMessage(msg)) - return m.stream.Send(msg) -} - -func (m *messageForwarder) Release() error { - m.cancel() - return m.eg.Wait() -} - -func (m *messageForwarder) Register(pid string) *procMessageForwarder { - m.mu.Lock() - defer m.mu.Unlock() - sender := newProcMessageForwarder() - m.pids[pid] = sender - return sender -} - -func (m *messageForwarder) Deregister(pid string) { - m.mu.Lock() - defer m.mu.Unlock() - sender, ok := m.pids[pid] - if !ok { - return - } - delete(m.pids, pid) - sender.Close() -} - -type msgWriter struct { - mux *messageForwarder - fd uint32 - processID string -} - -func (w *msgWriter) Write(msg []byte) (int, error) { - err := w.mux.Send(&pb.ExecMessage{ - ProcessID: w.processID, - Input: &pb.ExecMessage_File{ - File: &pb.FdMessage{ - Fd: w.fd, - Data: msg, - }, - }, - }) - if err != nil { - return 0, err - } - return len(msg), nil -} - -func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { - err := c.caps.Supports(pb.CapGatewayExec) - if err != nil { - return nil, err - } - id := identity.NewID() - var mounts []*opspb.Mount - for _, m := range req.Mounts { - resultID := m.ResultID - if m.Ref != nil { - ref, ok := m.Ref.(*reference) - if !ok { - return nil, errors.Errorf("unexpected type for reference, got %T", m.Ref) - } - resultID = ref.id - } - mounts = append(mounts, &opspb.Mount{ - Dest: m.Dest, - Selector: m.Selector, - Readonly: m.Readonly, - MountType: m.MountType, - ResultID: resultID, - CacheOpt: m.CacheOpt, - SecretOpt: m.SecretOpt, - SSHOpt: m.SSHOpt, - }) - } - - logrus.Debugf("|---> NewContainer %s", id) - _, err = c.client.NewContainer(ctx, &pb.NewContainerRequest{ - ContainerID: id, - Mounts: mounts, - Platform: req.Platform, - Constraints: req.Constraints, - }) - if err != nil { - return nil, err - } - - // ensure message forwarder is started, only sets up stream first time called - err = c.execMsgs.Start() - if err != nil { - return nil, err - } - - return &container{ - client: c.client, - id: id, - execMsgs: c.execMsgs, - }, nil -} - -type container struct { - client pb.LLBBridgeClient - id string - execMsgs *messageForwarder -} - -func (ctr *container) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) { - pid := fmt.Sprintf("%s:%s", ctr.id, identity.NewID()) - msgs := ctr.execMsgs.Register(pid) - - init := &pb.InitMessage{ - ContainerID: ctr.id, - Meta: &opspb.Meta{ - Args: req.Args, - Env: req.Env, - Cwd: req.Cwd, - User: req.User, - }, - Tty: req.Tty, - Security: req.SecurityMode, - } - if req.Stdin != nil { - init.Fds = append(init.Fds, 0) - } - if req.Stdout != nil { - init.Fds = append(init.Fds, 1) - } - if req.Stderr != nil { - init.Fds = append(init.Fds, 2) - } - - err := ctr.execMsgs.Send(&pb.ExecMessage{ - ProcessID: pid, - Input: &pb.ExecMessage_Init{ - Init: init, - }, - }) - if err != nil { - return nil, err - } - - msg, _ := msgs.Recv(ctx) - if msg == nil { - return nil, errors.Errorf("failed to receive started message") - } - started := msg.GetStarted() - if started == nil { - return nil, errors.Errorf("expecting started message, got %T", msg.GetInput()) - } - - eg, ctx := errgroup.WithContext(ctx) - done := make(chan struct{}) - - ctrProc := &containerProcess{ - execMsgs: ctr.execMsgs, - id: pid, - eg: eg, - } - - var stdinReader *io.PipeReader - ctrProc.eg.Go(func() error { - <-done - if stdinReader != nil { - return stdinReader.Close() - } - return nil - }) - - if req.Stdin != nil { - var stdinWriter io.WriteCloser - stdinReader, stdinWriter = io.Pipe() - // This go routine is intentionally not part of the errgroup because - // if os.Stdin is used for req.Stdin then this will block until - // the user closes the input, which will likely be after we are done - // with the container, so we can't Wait on it. - go func() { - io.Copy(stdinWriter, req.Stdin) - stdinWriter.Close() - }() - - ctrProc.eg.Go(func() error { - m := &msgWriter{ - mux: ctr.execMsgs, - processID: pid, - fd: 0, - } - _, err := io.Copy(m, stdinReader) - // ignore ErrClosedPipe, it is EOF for our usage. - if err != nil && !errors.Is(err, io.ErrClosedPipe) { - return err - } - // not an error so must be eof - return ctr.execMsgs.Send(&pb.ExecMessage{ - ProcessID: pid, - Input: &pb.ExecMessage_File{ - File: &pb.FdMessage{ - Fd: 0, - EOF: true, - }, - }, - }) - }) - } - - ctrProc.eg.Go(func() error { - var closeDoneOnce sync.Once - var exitError error - for { - msg, ok := msgs.Recv(ctx) - if !ok { - // no more messages, return - return exitError - } - - if msg == nil { - // empty message from ctx cancel, so just start shutting down - // input, but continue processing more exit/done messages - closeDoneOnce.Do(func() { - close(done) - }) - continue - } - - if file := msg.GetFile(); file != nil { - var out io.WriteCloser - switch file.Fd { - case 1: - out = req.Stdout - case 2: - out = req.Stderr - } - if out == nil { - // if things are plumbed correctly this should never happen - return errors.Errorf("missing writer for output fd %d", file.Fd) - } - if len(file.Data) > 0 { - _, err := out.Write(file.Data) - if err != nil { - return err - } - } - } else if exit := msg.GetExit(); exit != nil { - // capture exit message to exitError so we can return it after - // the server sends the Done message - closeDoneOnce.Do(func() { - close(done) - }) - if exit.Code == 0 { - continue - } - exitError = grpcerrors.FromGRPC(status.ErrorProto(&spb.Status{ - Code: exit.Error.Code, - Message: exit.Error.Message, - Details: convertGogoAny(exit.Error.Details), - })) - if exit.Code != errdefs.UnknownExitStatus { - exitError = &errdefs.ExitError{ExitCode: exit.Code, Err: exitError} - } - } else if serverDone := msg.GetDone(); serverDone != nil { - return exitError - } else { - return errors.Errorf("unexpected Exec Message for pid %s: %T", pid, msg.GetInput()) - } - } - }) - - return ctrProc, nil -} - -func (ctr *container) Release(ctx context.Context) error { - logrus.Debugf("|---> ReleaseContainer %s", ctr.id) - _, err := ctr.client.ReleaseContainer(ctx, &pb.ReleaseContainerRequest{ - ContainerID: ctr.id, - }) - return err -} - -type containerProcess struct { - execMsgs *messageForwarder - id string - eg *errgroup.Group -} - -func (ctrProc *containerProcess) Wait() error { - defer ctrProc.execMsgs.Deregister(ctrProc.id) - return ctrProc.eg.Wait() -} - -func (ctrProc *containerProcess) Resize(_ context.Context, size client.WinSize) error { - return ctrProc.execMsgs.Send(&pb.ExecMessage{ - ProcessID: ctrProc.id, - Input: &pb.ExecMessage_Resize{ - Resize: &pb.ResizeMessage{ - Cols: size.Cols, - Rows: size.Rows, - }, - }, - }) -} - -type reference struct { - c *grpcClient - id string - def *opspb.Definition -} - -func newReference(c *grpcClient, ref *pb.Ref) (*reference, error) { - return &reference{c: c, id: ref.Id, def: ref.Def}, nil -} - -func (r *reference) ToState() (st llb.State, err error) { - err = r.c.caps.Supports(pb.CapReferenceOutput) - if err != nil { - return st, err - } - - if r.def == nil { - return st, errors.Errorf("gateway did not return reference with definition") - } - - defop, err := llb.NewDefinitionOp(r.def) - if err != nil { - return st, err - } - - return llb.NewState(defop), nil -} - -func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) { - rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id} - if r := req.Range; r != nil { - rfr.Range = &pb.FileRange{ - Offset: int64(r.Offset), - Length: int64(r.Length), - } - } - resp, err := r.c.client.ReadFile(ctx, rfr) - if err != nil { - return nil, err - } - return resp.Data, nil -} - -func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) { - if err := r.c.caps.Supports(pb.CapReadDir); err != nil { - return nil, err - } - rdr := &pb.ReadDirRequest{ - DirPath: req.Path, - IncludePattern: req.IncludePattern, - Ref: r.id, - } - resp, err := r.c.client.ReadDir(ctx, rdr) - if err != nil { - return nil, err - } - return resp.Entries, nil -} - -func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) { - if err := r.c.caps.Supports(pb.CapStatFile); err != nil { - return nil, err - } - rdr := &pb.StatFileRequest{ - Path: req.Path, - Ref: r.id, - } - resp, err := r.c.client.StatFile(ctx, rdr) - if err != nil { - return nil, err - } - return resp.Stat, nil -} - -func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) { - dialOpt := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { - return stdioConn(), nil - }) - - cc, err := grpc.DialContext(ctx, "localhost", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor)) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create grpc client") - } - - ctx, cancel := context.WithCancel(ctx) - _ = cancel - // go monitorHealth(ctx, cc, cancel) - - return ctx, cc, nil -} - -func stdioConn() net.Conn { - return &conn{os.Stdin, os.Stdout, os.Stdout} -} - -type conn struct { - io.Reader - io.Writer - io.Closer -} - -func (s *conn) LocalAddr() net.Addr { - return dummyAddr{} -} -func (s *conn) RemoteAddr() net.Addr { - return dummyAddr{} -} -func (s *conn) SetDeadline(t time.Time) error { - return nil -} -func (s *conn) SetReadDeadline(t time.Time) error { - return nil -} -func (s *conn) SetWriteDeadline(t time.Time) error { - return nil -} - -type dummyAddr struct { -} - -func (d dummyAddr) Network() string { - return "pipe" -} - -func (d dummyAddr) String() string { - return "localhost" -} - -func opts() map[string]string { - opts := map[string]string{} - for _, env := range os.Environ() { - parts := strings.SplitN(env, "=", 2) - k := parts[0] - v := "" - if len(parts) == 2 { - v = parts[1] - } - if !strings.HasPrefix(k, frontendPrefix) { - continue - } - parts = strings.SplitN(v, "=", 2) - v = "" - if len(parts) == 2 { - v = parts[1] - } - opts[parts[0]] = v - } - return opts -} - -func sessionID() string { - return os.Getenv("BUILDKIT_SESSION_ID") -} - -func workers() []client.WorkerInfo { - var c []client.WorkerInfo - if err := json.Unmarshal([]byte(os.Getenv("BUILDKIT_WORKERS")), &c); err != nil { - return nil - } - return c -} - -func product() string { - return os.Getenv("BUILDKIT_EXPORTEDPRODUCT") -} - -func convertGogoAny(in []*gogotypes.Any) []*any.Any { - out := make([]*any.Any, len(in)) - for i := range in { - out[i] = &any.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value} - } - return out -} - -func convertToGogoAny(in []*any.Any) []*gogotypes.Any { - out := make([]*gogotypes.Any, len(in)) - for i := range in { - out[i] = &gogotypes.Any{TypeUrl: in[i].TypeUrl, Value: in[i].Value} - } - return out -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go deleted file mode 100644 index efddd746d362..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ /dev/null @@ -1,172 +0,0 @@ -package moby_buildkit_v1_frontend //nolint:golint - -import "github.com/moby/buildkit/util/apicaps" - -var Caps apicaps.CapList - -// Every backwards or forwards non-compatible change needs to add a new capability row. -// By default new capabilities should be experimental. After merge a capability is -// considered immutable. After a capability is marked stable it should not be disabled. - -const ( - CapSolveBase apicaps.CapID = "solve.base" - CapSolveInlineReturn apicaps.CapID = "solve.inlinereturn" - CapResolveImage apicaps.CapID = "resolveimage" - CapResolveImageResolveMode apicaps.CapID = "resolveimage.resolvemode" - CapReadFile apicaps.CapID = "readfile" - CapReturnResult apicaps.CapID = "return" - CapReturnMap apicaps.CapID = "returnmap" - CapReadDir apicaps.CapID = "readdir" - CapStatFile apicaps.CapID = "statfile" - CapImportCaches apicaps.CapID = "importcaches" - - // CapProtoRefArray is a capability to return arrays of refs instead of single - // refs. This capability is only for the wire format change and shouldn't be - // used in frontends for feature detection. - CapProtoRefArray apicaps.CapID = "proto.refarray" - - // CapReferenceOutput is a capability to use a reference of a solved result as - // an llb.Output. - CapReferenceOutput apicaps.CapID = "reference.output" - - // CapFrontendInputs is a capability to request frontend inputs from the - // LLBBridge GRPC server. - CapFrontendInputs apicaps.CapID = "frontend.inputs" - - // CapGatewaySolveMetadata can be used to check if solve calls from gateway reliably return metadata - CapGatewaySolveMetadata apicaps.CapID = "gateway.solve.metadata" - - // CapGatewayExec is the capability to create and interact with new - // containers directly through the gateway - CapGatewayExec apicaps.CapID = "gateway.exec" - - // CapFrontendCaps can be used to check that frontends define support for certain capabilities - CapFrontendCaps apicaps.CapID = "frontend.caps" - - // CapGatewayEvaluateSolve is a capability to immediately unlazy solve - // results. This is generally used by the client to return and handle solve - // errors. - CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate" -) - -func init() { - - Caps.Init(apicaps.Cap{ - ID: CapSolveBase, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSolveInlineReturn, - Name: "inline return from solve", - Enabled: true, - Deprecated: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapResolveImage, - Name: "resolve remote image config", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapResolveImageResolveMode, - Name: "resolve remote image config with custom resolvemode", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReadFile, - Name: "read static file", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReturnResult, - Name: "return solve result", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReturnMap, - Name: "return reference map", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReadDir, - Name: "read static directory", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapStatFile, - Name: "stat a file", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapImportCaches, - Name: "import caches", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapProtoRefArray, - Name: "wire format ref arrays", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapReferenceOutput, - Name: "reference output", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapFrontendInputs, - Name: "frontend inputs", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapGatewaySolveMetadata, - Name: "gateway metadata", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapGatewayExec, - Name: "gateway exec", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapFrontendCaps, - Name: "frontend capabilities", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapGatewayEvaluateSolve, - Name: "gateway evaluate solve", - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go deleted file mode 100644 index 06fcb98ea4d8..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ /dev/null @@ -1,10066 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: gateway.proto - -package moby_buildkit_v1_frontend - -import ( - context "context" - fmt "fmt" - rpc "github.com/gogo/googleapis/google/rpc" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - types1 "github.com/moby/buildkit/api/types" - pb "github.com/moby/buildkit/solver/pb" - pb1 "github.com/moby/buildkit/util/apicaps/pb" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - types "github.com/tonistiigi/fsutil/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Result struct { - // Types that are valid to be assigned to Result: - // *Result_RefDeprecated - // *Result_RefsDeprecated - // *Result_Ref - // *Result_Refs - Result isResult_Result `protobuf_oneof:"result"` - Metadata map[string][]byte `protobuf:"bytes,10,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Result) Reset() { *m = Result{} } -func (m *Result) String() string { return proto.CompactTextString(m) } -func (*Result) ProtoMessage() {} -func (*Result) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{0} -} -func (m *Result) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Result) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Result.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Result) XXX_Merge(src proto.Message) { - xxx_messageInfo_Result.Merge(m, src) -} -func (m *Result) XXX_Size() int { - return m.Size() -} -func (m *Result) XXX_DiscardUnknown() { - xxx_messageInfo_Result.DiscardUnknown(m) -} - -var xxx_messageInfo_Result proto.InternalMessageInfo - -type isResult_Result interface { - isResult_Result() - MarshalTo([]byte) (int, error) - Size() int -} - -type Result_RefDeprecated struct { - RefDeprecated string `protobuf:"bytes,1,opt,name=refDeprecated,proto3,oneof" json:"refDeprecated,omitempty"` -} -type Result_RefsDeprecated struct { - RefsDeprecated *RefMapDeprecated `protobuf:"bytes,2,opt,name=refsDeprecated,proto3,oneof" json:"refsDeprecated,omitempty"` -} -type Result_Ref struct { - Ref *Ref `protobuf:"bytes,3,opt,name=ref,proto3,oneof" json:"ref,omitempty"` -} -type Result_Refs struct { - Refs *RefMap `protobuf:"bytes,4,opt,name=refs,proto3,oneof" json:"refs,omitempty"` -} - -func (*Result_RefDeprecated) isResult_Result() {} -func (*Result_RefsDeprecated) isResult_Result() {} -func (*Result_Ref) isResult_Result() {} -func (*Result_Refs) isResult_Result() {} - -func (m *Result) GetResult() isResult_Result { - if m != nil { - return m.Result - } - return nil -} - -func (m *Result) GetRefDeprecated() string { - if x, ok := m.GetResult().(*Result_RefDeprecated); ok { - return x.RefDeprecated - } - return "" -} - -func (m *Result) GetRefsDeprecated() *RefMapDeprecated { - if x, ok := m.GetResult().(*Result_RefsDeprecated); ok { - return x.RefsDeprecated - } - return nil -} - -func (m *Result) GetRef() *Ref { - if x, ok := m.GetResult().(*Result_Ref); ok { - return x.Ref - } - return nil -} - -func (m *Result) GetRefs() *RefMap { - if x, ok := m.GetResult().(*Result_Refs); ok { - return x.Refs - } - return nil -} - -func (m *Result) GetMetadata() map[string][]byte { - if m != nil { - return m.Metadata - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Result) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Result_RefDeprecated)(nil), - (*Result_RefsDeprecated)(nil), - (*Result_Ref)(nil), - (*Result_Refs)(nil), - } -} - -type RefMapDeprecated struct { - Refs map[string]string `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RefMapDeprecated) Reset() { *m = RefMapDeprecated{} } -func (m *RefMapDeprecated) String() string { return proto.CompactTextString(m) } -func (*RefMapDeprecated) ProtoMessage() {} -func (*RefMapDeprecated) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{1} -} -func (m *RefMapDeprecated) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RefMapDeprecated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RefMapDeprecated.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RefMapDeprecated) XXX_Merge(src proto.Message) { - xxx_messageInfo_RefMapDeprecated.Merge(m, src) -} -func (m *RefMapDeprecated) XXX_Size() int { - return m.Size() -} -func (m *RefMapDeprecated) XXX_DiscardUnknown() { - xxx_messageInfo_RefMapDeprecated.DiscardUnknown(m) -} - -var xxx_messageInfo_RefMapDeprecated proto.InternalMessageInfo - -func (m *RefMapDeprecated) GetRefs() map[string]string { - if m != nil { - return m.Refs - } - return nil -} - -type Ref struct { - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Def *pb.Definition `protobuf:"bytes,2,opt,name=def,proto3" json:"def,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Ref) Reset() { *m = Ref{} } -func (m *Ref) String() string { return proto.CompactTextString(m) } -func (*Ref) ProtoMessage() {} -func (*Ref) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{2} -} -func (m *Ref) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Ref) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Ref.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Ref) XXX_Merge(src proto.Message) { - xxx_messageInfo_Ref.Merge(m, src) -} -func (m *Ref) XXX_Size() int { - return m.Size() -} -func (m *Ref) XXX_DiscardUnknown() { - xxx_messageInfo_Ref.DiscardUnknown(m) -} - -var xxx_messageInfo_Ref proto.InternalMessageInfo - -func (m *Ref) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *Ref) GetDef() *pb.Definition { - if m != nil { - return m.Def - } - return nil -} - -type RefMap struct { - Refs map[string]*Ref `protobuf:"bytes,1,rep,name=refs,proto3" json:"refs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *RefMap) Reset() { *m = RefMap{} } -func (m *RefMap) String() string { return proto.CompactTextString(m) } -func (*RefMap) ProtoMessage() {} -func (*RefMap) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{3} -} -func (m *RefMap) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RefMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RefMap.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RefMap) XXX_Merge(src proto.Message) { - xxx_messageInfo_RefMap.Merge(m, src) -} -func (m *RefMap) XXX_Size() int { - return m.Size() -} -func (m *RefMap) XXX_DiscardUnknown() { - xxx_messageInfo_RefMap.DiscardUnknown(m) -} - -var xxx_messageInfo_RefMap proto.InternalMessageInfo - -func (m *RefMap) GetRefs() map[string]*Ref { - if m != nil { - return m.Refs - } - return nil -} - -type ReturnRequest struct { - Result *Result `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` - Error *rpc.Status `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReturnRequest) Reset() { *m = ReturnRequest{} } -func (m *ReturnRequest) String() string { return proto.CompactTextString(m) } -func (*ReturnRequest) ProtoMessage() {} -func (*ReturnRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{4} -} -func (m *ReturnRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReturnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReturnRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReturnRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReturnRequest.Merge(m, src) -} -func (m *ReturnRequest) XXX_Size() int { - return m.Size() -} -func (m *ReturnRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReturnRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReturnRequest proto.InternalMessageInfo - -func (m *ReturnRequest) GetResult() *Result { - if m != nil { - return m.Result - } - return nil -} - -func (m *ReturnRequest) GetError() *rpc.Status { - if m != nil { - return m.Error - } - return nil -} - -type ReturnResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReturnResponse) Reset() { *m = ReturnResponse{} } -func (m *ReturnResponse) String() string { return proto.CompactTextString(m) } -func (*ReturnResponse) ProtoMessage() {} -func (*ReturnResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{5} -} -func (m *ReturnResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReturnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReturnResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReturnResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReturnResponse.Merge(m, src) -} -func (m *ReturnResponse) XXX_Size() int { - return m.Size() -} -func (m *ReturnResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReturnResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReturnResponse proto.InternalMessageInfo - -type InputsRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InputsRequest) Reset() { *m = InputsRequest{} } -func (m *InputsRequest) String() string { return proto.CompactTextString(m) } -func (*InputsRequest) ProtoMessage() {} -func (*InputsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{6} -} -func (m *InputsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InputsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InputsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InputsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_InputsRequest.Merge(m, src) -} -func (m *InputsRequest) XXX_Size() int { - return m.Size() -} -func (m *InputsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_InputsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_InputsRequest proto.InternalMessageInfo - -type InputsResponse struct { - Definitions map[string]*pb.Definition `protobuf:"bytes,1,rep,name=Definitions,proto3" json:"Definitions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InputsResponse) Reset() { *m = InputsResponse{} } -func (m *InputsResponse) String() string { return proto.CompactTextString(m) } -func (*InputsResponse) ProtoMessage() {} -func (*InputsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{7} -} -func (m *InputsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InputsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InputsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InputsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_InputsResponse.Merge(m, src) -} -func (m *InputsResponse) XXX_Size() int { - return m.Size() -} -func (m *InputsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_InputsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_InputsResponse proto.InternalMessageInfo - -func (m *InputsResponse) GetDefinitions() map[string]*pb.Definition { - if m != nil { - return m.Definitions - } - return nil -} - -type ResolveImageConfigRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Platform *pb.Platform `protobuf:"bytes,2,opt,name=Platform,proto3" json:"Platform,omitempty"` - ResolveMode string `protobuf:"bytes,3,opt,name=ResolveMode,proto3" json:"ResolveMode,omitempty"` - LogName string `protobuf:"bytes,4,opt,name=LogName,proto3" json:"LogName,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveImageConfigRequest) Reset() { *m = ResolveImageConfigRequest{} } -func (m *ResolveImageConfigRequest) String() string { return proto.CompactTextString(m) } -func (*ResolveImageConfigRequest) ProtoMessage() {} -func (*ResolveImageConfigRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{8} -} -func (m *ResolveImageConfigRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResolveImageConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResolveImageConfigRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResolveImageConfigRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveImageConfigRequest.Merge(m, src) -} -func (m *ResolveImageConfigRequest) XXX_Size() int { - return m.Size() -} -func (m *ResolveImageConfigRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveImageConfigRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveImageConfigRequest proto.InternalMessageInfo - -func (m *ResolveImageConfigRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *ResolveImageConfigRequest) GetPlatform() *pb.Platform { - if m != nil { - return m.Platform - } - return nil -} - -func (m *ResolveImageConfigRequest) GetResolveMode() string { - if m != nil { - return m.ResolveMode - } - return "" -} - -func (m *ResolveImageConfigRequest) GetLogName() string { - if m != nil { - return m.LogName - } - return "" -} - -type ResolveImageConfigResponse struct { - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=Digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"Digest"` - Config []byte `protobuf:"bytes,2,opt,name=Config,proto3" json:"Config,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResolveImageConfigResponse) Reset() { *m = ResolveImageConfigResponse{} } -func (m *ResolveImageConfigResponse) String() string { return proto.CompactTextString(m) } -func (*ResolveImageConfigResponse) ProtoMessage() {} -func (*ResolveImageConfigResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{9} -} -func (m *ResolveImageConfigResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResolveImageConfigResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResolveImageConfigResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResolveImageConfigResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResolveImageConfigResponse.Merge(m, src) -} -func (m *ResolveImageConfigResponse) XXX_Size() int { - return m.Size() -} -func (m *ResolveImageConfigResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ResolveImageConfigResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ResolveImageConfigResponse proto.InternalMessageInfo - -func (m *ResolveImageConfigResponse) GetConfig() []byte { - if m != nil { - return m.Config - } - return nil -} - -type SolveRequest struct { - Definition *pb.Definition `protobuf:"bytes,1,opt,name=Definition,proto3" json:"Definition,omitempty"` - Frontend string `protobuf:"bytes,2,opt,name=Frontend,proto3" json:"Frontend,omitempty"` - FrontendOpt map[string]string `protobuf:"bytes,3,rep,name=FrontendOpt,proto3" json:"FrontendOpt,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportCacheRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importCacheRef}} - // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) - ImportCacheRefsDeprecated []string `protobuf:"bytes,4,rep,name=ImportCacheRefsDeprecated,proto3" json:"ImportCacheRefsDeprecated,omitempty"` - AllowResultReturn bool `protobuf:"varint,5,opt,name=allowResultReturn,proto3" json:"allowResultReturn,omitempty"` - AllowResultArrayRef bool `protobuf:"varint,6,opt,name=allowResultArrayRef,proto3" json:"allowResultArrayRef,omitempty"` - // apicaps.CapSolveInlineReturn deprecated - Final bool `protobuf:"varint,10,opt,name=Final,proto3" json:"Final,omitempty"` - ExporterAttr []byte `protobuf:"bytes,11,opt,name=ExporterAttr,proto3" json:"ExporterAttr,omitempty"` - // CacheImports was added in BuildKit v0.4.0. - // apicaps:CapImportCaches - CacheImports []*CacheOptionsEntry `protobuf:"bytes,12,rep,name=CacheImports,proto3" json:"CacheImports,omitempty"` - // apicaps:CapFrontendInputs - FrontendInputs map[string]*pb.Definition `protobuf:"bytes,13,rep,name=FrontendInputs,proto3" json:"FrontendInputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Evaluate bool `protobuf:"varint,14,opt,name=Evaluate,proto3" json:"Evaluate,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SolveRequest) Reset() { *m = SolveRequest{} } -func (m *SolveRequest) String() string { return proto.CompactTextString(m) } -func (*SolveRequest) ProtoMessage() {} -func (*SolveRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{10} -} -func (m *SolveRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SolveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SolveRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SolveRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_SolveRequest.Merge(m, src) -} -func (m *SolveRequest) XXX_Size() int { - return m.Size() -} -func (m *SolveRequest) XXX_DiscardUnknown() { - xxx_messageInfo_SolveRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_SolveRequest proto.InternalMessageInfo - -func (m *SolveRequest) GetDefinition() *pb.Definition { - if m != nil { - return m.Definition - } - return nil -} - -func (m *SolveRequest) GetFrontend() string { - if m != nil { - return m.Frontend - } - return "" -} - -func (m *SolveRequest) GetFrontendOpt() map[string]string { - if m != nil { - return m.FrontendOpt - } - return nil -} - -func (m *SolveRequest) GetImportCacheRefsDeprecated() []string { - if m != nil { - return m.ImportCacheRefsDeprecated - } - return nil -} - -func (m *SolveRequest) GetAllowResultReturn() bool { - if m != nil { - return m.AllowResultReturn - } - return false -} - -func (m *SolveRequest) GetAllowResultArrayRef() bool { - if m != nil { - return m.AllowResultArrayRef - } - return false -} - -func (m *SolveRequest) GetFinal() bool { - if m != nil { - return m.Final - } - return false -} - -func (m *SolveRequest) GetExporterAttr() []byte { - if m != nil { - return m.ExporterAttr - } - return nil -} - -func (m *SolveRequest) GetCacheImports() []*CacheOptionsEntry { - if m != nil { - return m.CacheImports - } - return nil -} - -func (m *SolveRequest) GetFrontendInputs() map[string]*pb.Definition { - if m != nil { - return m.FrontendInputs - } - return nil -} - -func (m *SolveRequest) GetEvaluate() bool { - if m != nil { - return m.Evaluate - } - return false -} - -// CacheOptionsEntry corresponds to the control.CacheOptionsEntry -type CacheOptionsEntry struct { - Type string `protobuf:"bytes,1,opt,name=Type,proto3" json:"Type,omitempty"` - Attrs map[string]string `protobuf:"bytes,2,rep,name=Attrs,proto3" json:"Attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *CacheOptionsEntry) Reset() { *m = CacheOptionsEntry{} } -func (m *CacheOptionsEntry) String() string { return proto.CompactTextString(m) } -func (*CacheOptionsEntry) ProtoMessage() {} -func (*CacheOptionsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{11} -} -func (m *CacheOptionsEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CacheOptionsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CacheOptionsEntry.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CacheOptionsEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_CacheOptionsEntry.Merge(m, src) -} -func (m *CacheOptionsEntry) XXX_Size() int { - return m.Size() -} -func (m *CacheOptionsEntry) XXX_DiscardUnknown() { - xxx_messageInfo_CacheOptionsEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_CacheOptionsEntry proto.InternalMessageInfo - -func (m *CacheOptionsEntry) GetType() string { - if m != nil { - return m.Type - } - return "" -} - -func (m *CacheOptionsEntry) GetAttrs() map[string]string { - if m != nil { - return m.Attrs - } - return nil -} - -type SolveResponse struct { - // deprecated - Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"` - // these fields are returned when allowMapReturn was set - Result *Result `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *SolveResponse) Reset() { *m = SolveResponse{} } -func (m *SolveResponse) String() string { return proto.CompactTextString(m) } -func (*SolveResponse) ProtoMessage() {} -func (*SolveResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{12} -} -func (m *SolveResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SolveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SolveResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SolveResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_SolveResponse.Merge(m, src) -} -func (m *SolveResponse) XXX_Size() int { - return m.Size() -} -func (m *SolveResponse) XXX_DiscardUnknown() { - xxx_messageInfo_SolveResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_SolveResponse proto.InternalMessageInfo - -func (m *SolveResponse) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *SolveResponse) GetResult() *Result { - if m != nil { - return m.Result - } - return nil -} - -type ReadFileRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - FilePath string `protobuf:"bytes,2,opt,name=FilePath,proto3" json:"FilePath,omitempty"` - Range *FileRange `protobuf:"bytes,3,opt,name=Range,proto3" json:"Range,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadFileRequest) Reset() { *m = ReadFileRequest{} } -func (m *ReadFileRequest) String() string { return proto.CompactTextString(m) } -func (*ReadFileRequest) ProtoMessage() {} -func (*ReadFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{13} -} -func (m *ReadFileRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadFileRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadFileRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadFileRequest.Merge(m, src) -} -func (m *ReadFileRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadFileRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadFileRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadFileRequest proto.InternalMessageInfo - -func (m *ReadFileRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *ReadFileRequest) GetFilePath() string { - if m != nil { - return m.FilePath - } - return "" -} - -func (m *ReadFileRequest) GetRange() *FileRange { - if m != nil { - return m.Range - } - return nil -} - -type FileRange struct { - Offset int64 `protobuf:"varint,1,opt,name=Offset,proto3" json:"Offset,omitempty"` - Length int64 `protobuf:"varint,2,opt,name=Length,proto3" json:"Length,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FileRange) Reset() { *m = FileRange{} } -func (m *FileRange) String() string { return proto.CompactTextString(m) } -func (*FileRange) ProtoMessage() {} -func (*FileRange) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{14} -} -func (m *FileRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FileRange.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FileRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileRange.Merge(m, src) -} -func (m *FileRange) XXX_Size() int { - return m.Size() -} -func (m *FileRange) XXX_DiscardUnknown() { - xxx_messageInfo_FileRange.DiscardUnknown(m) -} - -var xxx_messageInfo_FileRange proto.InternalMessageInfo - -func (m *FileRange) GetOffset() int64 { - if m != nil { - return m.Offset - } - return 0 -} - -func (m *FileRange) GetLength() int64 { - if m != nil { - return m.Length - } - return 0 -} - -type ReadFileResponse struct { - Data []byte `protobuf:"bytes,1,opt,name=Data,proto3" json:"Data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadFileResponse) Reset() { *m = ReadFileResponse{} } -func (m *ReadFileResponse) String() string { return proto.CompactTextString(m) } -func (*ReadFileResponse) ProtoMessage() {} -func (*ReadFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{15} -} -func (m *ReadFileResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadFileResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadFileResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadFileResponse.Merge(m, src) -} -func (m *ReadFileResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadFileResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadFileResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadFileResponse proto.InternalMessageInfo - -func (m *ReadFileResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type ReadDirRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - DirPath string `protobuf:"bytes,2,opt,name=DirPath,proto3" json:"DirPath,omitempty"` - IncludePattern string `protobuf:"bytes,3,opt,name=IncludePattern,proto3" json:"IncludePattern,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDirRequest) Reset() { *m = ReadDirRequest{} } -func (m *ReadDirRequest) String() string { return proto.CompactTextString(m) } -func (*ReadDirRequest) ProtoMessage() {} -func (*ReadDirRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{16} -} -func (m *ReadDirRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadDirRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadDirRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadDirRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDirRequest.Merge(m, src) -} -func (m *ReadDirRequest) XXX_Size() int { - return m.Size() -} -func (m *ReadDirRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDirRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDirRequest proto.InternalMessageInfo - -func (m *ReadDirRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *ReadDirRequest) GetDirPath() string { - if m != nil { - return m.DirPath - } - return "" -} - -func (m *ReadDirRequest) GetIncludePattern() string { - if m != nil { - return m.IncludePattern - } - return "" -} - -type ReadDirResponse struct { - Entries []*types.Stat `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReadDirResponse) Reset() { *m = ReadDirResponse{} } -func (m *ReadDirResponse) String() string { return proto.CompactTextString(m) } -func (*ReadDirResponse) ProtoMessage() {} -func (*ReadDirResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{17} -} -func (m *ReadDirResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReadDirResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReadDirResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReadDirResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReadDirResponse.Merge(m, src) -} -func (m *ReadDirResponse) XXX_Size() int { - return m.Size() -} -func (m *ReadDirResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReadDirResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReadDirResponse proto.InternalMessageInfo - -func (m *ReadDirResponse) GetEntries() []*types.Stat { - if m != nil { - return m.Entries - } - return nil -} - -type StatFileRequest struct { - Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` - Path string `protobuf:"bytes,2,opt,name=Path,proto3" json:"Path,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatFileRequest) Reset() { *m = StatFileRequest{} } -func (m *StatFileRequest) String() string { return proto.CompactTextString(m) } -func (*StatFileRequest) ProtoMessage() {} -func (*StatFileRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{18} -} -func (m *StatFileRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatFileRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatFileRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatFileRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatFileRequest.Merge(m, src) -} -func (m *StatFileRequest) XXX_Size() int { - return m.Size() -} -func (m *StatFileRequest) XXX_DiscardUnknown() { - xxx_messageInfo_StatFileRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_StatFileRequest proto.InternalMessageInfo - -func (m *StatFileRequest) GetRef() string { - if m != nil { - return m.Ref - } - return "" -} - -func (m *StatFileRequest) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -type StatFileResponse struct { - Stat *types.Stat `protobuf:"bytes,1,opt,name=stat,proto3" json:"stat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StatFileResponse) Reset() { *m = StatFileResponse{} } -func (m *StatFileResponse) String() string { return proto.CompactTextString(m) } -func (*StatFileResponse) ProtoMessage() {} -func (*StatFileResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{19} -} -func (m *StatFileResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatFileResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StatFileResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StatFileResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatFileResponse.Merge(m, src) -} -func (m *StatFileResponse) XXX_Size() int { - return m.Size() -} -func (m *StatFileResponse) XXX_DiscardUnknown() { - xxx_messageInfo_StatFileResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_StatFileResponse proto.InternalMessageInfo - -func (m *StatFileResponse) GetStat() *types.Stat { - if m != nil { - return m.Stat - } - return nil -} - -type PingRequest struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PingRequest) Reset() { *m = PingRequest{} } -func (m *PingRequest) String() string { return proto.CompactTextString(m) } -func (*PingRequest) ProtoMessage() {} -func (*PingRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{20} -} -func (m *PingRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PingRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PingRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_PingRequest.Merge(m, src) -} -func (m *PingRequest) XXX_Size() int { - return m.Size() -} -func (m *PingRequest) XXX_DiscardUnknown() { - xxx_messageInfo_PingRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_PingRequest proto.InternalMessageInfo - -type PongResponse struct { - FrontendAPICaps []pb1.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps,proto3" json:"FrontendAPICaps"` - LLBCaps []pb1.APICap `protobuf:"bytes,2,rep,name=LLBCaps,proto3" json:"LLBCaps"` - Workers []*types1.WorkerRecord `protobuf:"bytes,3,rep,name=Workers,proto3" json:"Workers,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *PongResponse) Reset() { *m = PongResponse{} } -func (m *PongResponse) String() string { return proto.CompactTextString(m) } -func (*PongResponse) ProtoMessage() {} -func (*PongResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{21} -} -func (m *PongResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PongResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PongResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PongResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_PongResponse.Merge(m, src) -} -func (m *PongResponse) XXX_Size() int { - return m.Size() -} -func (m *PongResponse) XXX_DiscardUnknown() { - xxx_messageInfo_PongResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_PongResponse proto.InternalMessageInfo - -func (m *PongResponse) GetFrontendAPICaps() []pb1.APICap { - if m != nil { - return m.FrontendAPICaps - } - return nil -} - -func (m *PongResponse) GetLLBCaps() []pb1.APICap { - if m != nil { - return m.LLBCaps - } - return nil -} - -func (m *PongResponse) GetWorkers() []*types1.WorkerRecord { - if m != nil { - return m.Workers - } - return nil -} - -type NewContainerRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` - // For mount input values we can use random identifiers passed with ref - Mounts []*pb.Mount `protobuf:"bytes,2,rep,name=Mounts,proto3" json:"Mounts,omitempty"` - Network pb.NetMode `protobuf:"varint,3,opt,name=Network,proto3,enum=pb.NetMode" json:"Network,omitempty"` - Platform *pb.Platform `protobuf:"bytes,4,opt,name=platform,proto3" json:"platform,omitempty"` - Constraints *pb.WorkerConstraints `protobuf:"bytes,5,opt,name=constraints,proto3" json:"constraints,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NewContainerRequest) Reset() { *m = NewContainerRequest{} } -func (m *NewContainerRequest) String() string { return proto.CompactTextString(m) } -func (*NewContainerRequest) ProtoMessage() {} -func (*NewContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{22} -} -func (m *NewContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NewContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NewContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NewContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewContainerRequest.Merge(m, src) -} -func (m *NewContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *NewContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_NewContainerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_NewContainerRequest proto.InternalMessageInfo - -func (m *NewContainerRequest) GetContainerID() string { - if m != nil { - return m.ContainerID - } - return "" -} - -func (m *NewContainerRequest) GetMounts() []*pb.Mount { - if m != nil { - return m.Mounts - } - return nil -} - -func (m *NewContainerRequest) GetNetwork() pb.NetMode { - if m != nil { - return m.Network - } - return pb.NetMode_UNSET -} - -func (m *NewContainerRequest) GetPlatform() *pb.Platform { - if m != nil { - return m.Platform - } - return nil -} - -func (m *NewContainerRequest) GetConstraints() *pb.WorkerConstraints { - if m != nil { - return m.Constraints - } - return nil -} - -type NewContainerResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *NewContainerResponse) Reset() { *m = NewContainerResponse{} } -func (m *NewContainerResponse) String() string { return proto.CompactTextString(m) } -func (*NewContainerResponse) ProtoMessage() {} -func (*NewContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{23} -} -func (m *NewContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NewContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_NewContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *NewContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_NewContainerResponse.Merge(m, src) -} -func (m *NewContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *NewContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_NewContainerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_NewContainerResponse proto.InternalMessageInfo - -type ReleaseContainerRequest struct { - ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReleaseContainerRequest) Reset() { *m = ReleaseContainerRequest{} } -func (m *ReleaseContainerRequest) String() string { return proto.CompactTextString(m) } -func (*ReleaseContainerRequest) ProtoMessage() {} -func (*ReleaseContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{24} -} -func (m *ReleaseContainerRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReleaseContainerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReleaseContainerRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReleaseContainerRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReleaseContainerRequest.Merge(m, src) -} -func (m *ReleaseContainerRequest) XXX_Size() int { - return m.Size() -} -func (m *ReleaseContainerRequest) XXX_DiscardUnknown() { - xxx_messageInfo_ReleaseContainerRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_ReleaseContainerRequest proto.InternalMessageInfo - -func (m *ReleaseContainerRequest) GetContainerID() string { - if m != nil { - return m.ContainerID - } - return "" -} - -type ReleaseContainerResponse struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ReleaseContainerResponse) Reset() { *m = ReleaseContainerResponse{} } -func (m *ReleaseContainerResponse) String() string { return proto.CompactTextString(m) } -func (*ReleaseContainerResponse) ProtoMessage() {} -func (*ReleaseContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{25} -} -func (m *ReleaseContainerResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReleaseContainerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ReleaseContainerResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ReleaseContainerResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReleaseContainerResponse.Merge(m, src) -} -func (m *ReleaseContainerResponse) XXX_Size() int { - return m.Size() -} -func (m *ReleaseContainerResponse) XXX_DiscardUnknown() { - xxx_messageInfo_ReleaseContainerResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_ReleaseContainerResponse proto.InternalMessageInfo - -type ExecMessage struct { - ProcessID string `protobuf:"bytes,1,opt,name=ProcessID,proto3" json:"ProcessID,omitempty"` - // Types that are valid to be assigned to Input: - // *ExecMessage_Init - // *ExecMessage_File - // *ExecMessage_Resize - // *ExecMessage_Started - // *ExecMessage_Exit - // *ExecMessage_Done - Input isExecMessage_Input `protobuf_oneof:"Input"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExecMessage) Reset() { *m = ExecMessage{} } -func (m *ExecMessage) String() string { return proto.CompactTextString(m) } -func (*ExecMessage) ProtoMessage() {} -func (*ExecMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{26} -} -func (m *ExecMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExecMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExecMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecMessage.Merge(m, src) -} -func (m *ExecMessage) XXX_Size() int { - return m.Size() -} -func (m *ExecMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ExecMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecMessage proto.InternalMessageInfo - -type isExecMessage_Input interface { - isExecMessage_Input() - MarshalTo([]byte) (int, error) - Size() int -} - -type ExecMessage_Init struct { - Init *InitMessage `protobuf:"bytes,2,opt,name=Init,proto3,oneof" json:"Init,omitempty"` -} -type ExecMessage_File struct { - File *FdMessage `protobuf:"bytes,3,opt,name=File,proto3,oneof" json:"File,omitempty"` -} -type ExecMessage_Resize struct { - Resize *ResizeMessage `protobuf:"bytes,4,opt,name=Resize,proto3,oneof" json:"Resize,omitempty"` -} -type ExecMessage_Started struct { - Started *StartedMessage `protobuf:"bytes,5,opt,name=Started,proto3,oneof" json:"Started,omitempty"` -} -type ExecMessage_Exit struct { - Exit *ExitMessage `protobuf:"bytes,6,opt,name=Exit,proto3,oneof" json:"Exit,omitempty"` -} -type ExecMessage_Done struct { - Done *DoneMessage `protobuf:"bytes,7,opt,name=Done,proto3,oneof" json:"Done,omitempty"` -} - -func (*ExecMessage_Init) isExecMessage_Input() {} -func (*ExecMessage_File) isExecMessage_Input() {} -func (*ExecMessage_Resize) isExecMessage_Input() {} -func (*ExecMessage_Started) isExecMessage_Input() {} -func (*ExecMessage_Exit) isExecMessage_Input() {} -func (*ExecMessage_Done) isExecMessage_Input() {} - -func (m *ExecMessage) GetInput() isExecMessage_Input { - if m != nil { - return m.Input - } - return nil -} - -func (m *ExecMessage) GetProcessID() string { - if m != nil { - return m.ProcessID - } - return "" -} - -func (m *ExecMessage) GetInit() *InitMessage { - if x, ok := m.GetInput().(*ExecMessage_Init); ok { - return x.Init - } - return nil -} - -func (m *ExecMessage) GetFile() *FdMessage { - if x, ok := m.GetInput().(*ExecMessage_File); ok { - return x.File - } - return nil -} - -func (m *ExecMessage) GetResize() *ResizeMessage { - if x, ok := m.GetInput().(*ExecMessage_Resize); ok { - return x.Resize - } - return nil -} - -func (m *ExecMessage) GetStarted() *StartedMessage { - if x, ok := m.GetInput().(*ExecMessage_Started); ok { - return x.Started - } - return nil -} - -func (m *ExecMessage) GetExit() *ExitMessage { - if x, ok := m.GetInput().(*ExecMessage_Exit); ok { - return x.Exit - } - return nil -} - -func (m *ExecMessage) GetDone() *DoneMessage { - if x, ok := m.GetInput().(*ExecMessage_Done); ok { - return x.Done - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*ExecMessage) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*ExecMessage_Init)(nil), - (*ExecMessage_File)(nil), - (*ExecMessage_Resize)(nil), - (*ExecMessage_Started)(nil), - (*ExecMessage_Exit)(nil), - (*ExecMessage_Done)(nil), - } -} - -type InitMessage struct { - ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` - Meta *pb.Meta `protobuf:"bytes,2,opt,name=Meta,proto3" json:"Meta,omitempty"` - Fds []uint32 `protobuf:"varint,3,rep,packed,name=Fds,proto3" json:"Fds,omitempty"` - Tty bool `protobuf:"varint,4,opt,name=Tty,proto3" json:"Tty,omitempty"` - Security pb.SecurityMode `protobuf:"varint,5,opt,name=Security,proto3,enum=pb.SecurityMode" json:"Security,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *InitMessage) Reset() { *m = InitMessage{} } -func (m *InitMessage) String() string { return proto.CompactTextString(m) } -func (*InitMessage) ProtoMessage() {} -func (*InitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{27} -} -func (m *InitMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *InitMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_InitMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *InitMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_InitMessage.Merge(m, src) -} -func (m *InitMessage) XXX_Size() int { - return m.Size() -} -func (m *InitMessage) XXX_DiscardUnknown() { - xxx_messageInfo_InitMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_InitMessage proto.InternalMessageInfo - -func (m *InitMessage) GetContainerID() string { - if m != nil { - return m.ContainerID - } - return "" -} - -func (m *InitMessage) GetMeta() *pb.Meta { - if m != nil { - return m.Meta - } - return nil -} - -func (m *InitMessage) GetFds() []uint32 { - if m != nil { - return m.Fds - } - return nil -} - -func (m *InitMessage) GetTty() bool { - if m != nil { - return m.Tty - } - return false -} - -func (m *InitMessage) GetSecurity() pb.SecurityMode { - if m != nil { - return m.Security - } - return pb.SecurityMode_SANDBOX -} - -type ExitMessage struct { - Code uint32 `protobuf:"varint,1,opt,name=Code,proto3" json:"Code,omitempty"` - Error *rpc.Status `protobuf:"bytes,2,opt,name=Error,proto3" json:"Error,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ExitMessage) Reset() { *m = ExitMessage{} } -func (m *ExitMessage) String() string { return proto.CompactTextString(m) } -func (*ExitMessage) ProtoMessage() {} -func (*ExitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{28} -} -func (m *ExitMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExitMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ExitMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ExitMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExitMessage.Merge(m, src) -} -func (m *ExitMessage) XXX_Size() int { - return m.Size() -} -func (m *ExitMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ExitMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ExitMessage proto.InternalMessageInfo - -func (m *ExitMessage) GetCode() uint32 { - if m != nil { - return m.Code - } - return 0 -} - -func (m *ExitMessage) GetError() *rpc.Status { - if m != nil { - return m.Error - } - return nil -} - -type StartedMessage struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *StartedMessage) Reset() { *m = StartedMessage{} } -func (m *StartedMessage) String() string { return proto.CompactTextString(m) } -func (*StartedMessage) ProtoMessage() {} -func (*StartedMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{29} -} -func (m *StartedMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StartedMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StartedMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StartedMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_StartedMessage.Merge(m, src) -} -func (m *StartedMessage) XXX_Size() int { - return m.Size() -} -func (m *StartedMessage) XXX_DiscardUnknown() { - xxx_messageInfo_StartedMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_StartedMessage proto.InternalMessageInfo - -type DoneMessage struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *DoneMessage) Reset() { *m = DoneMessage{} } -func (m *DoneMessage) String() string { return proto.CompactTextString(m) } -func (*DoneMessage) ProtoMessage() {} -func (*DoneMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{30} -} -func (m *DoneMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DoneMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DoneMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DoneMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_DoneMessage.Merge(m, src) -} -func (m *DoneMessage) XXX_Size() int { - return m.Size() -} -func (m *DoneMessage) XXX_DiscardUnknown() { - xxx_messageInfo_DoneMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_DoneMessage proto.InternalMessageInfo - -type FdMessage struct { - Fd uint32 `protobuf:"varint,1,opt,name=Fd,proto3" json:"Fd,omitempty"` - EOF bool `protobuf:"varint,2,opt,name=EOF,proto3" json:"EOF,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=Data,proto3" json:"Data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *FdMessage) Reset() { *m = FdMessage{} } -func (m *FdMessage) String() string { return proto.CompactTextString(m) } -func (*FdMessage) ProtoMessage() {} -func (*FdMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{31} -} -func (m *FdMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FdMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FdMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FdMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_FdMessage.Merge(m, src) -} -func (m *FdMessage) XXX_Size() int { - return m.Size() -} -func (m *FdMessage) XXX_DiscardUnknown() { - xxx_messageInfo_FdMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_FdMessage proto.InternalMessageInfo - -func (m *FdMessage) GetFd() uint32 { - if m != nil { - return m.Fd - } - return 0 -} - -func (m *FdMessage) GetEOF() bool { - if m != nil { - return m.EOF - } - return false -} - -func (m *FdMessage) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type ResizeMessage struct { - Rows uint32 `protobuf:"varint,1,opt,name=Rows,proto3" json:"Rows,omitempty"` - Cols uint32 `protobuf:"varint,2,opt,name=Cols,proto3" json:"Cols,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *ResizeMessage) Reset() { *m = ResizeMessage{} } -func (m *ResizeMessage) String() string { return proto.CompactTextString(m) } -func (*ResizeMessage) ProtoMessage() {} -func (*ResizeMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{32} -} -func (m *ResizeMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResizeMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResizeMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResizeMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResizeMessage.Merge(m, src) -} -func (m *ResizeMessage) XXX_Size() int { - return m.Size() -} -func (m *ResizeMessage) XXX_DiscardUnknown() { - xxx_messageInfo_ResizeMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_ResizeMessage proto.InternalMessageInfo - -func (m *ResizeMessage) GetRows() uint32 { - if m != nil { - return m.Rows - } - return 0 -} - -func (m *ResizeMessage) GetCols() uint32 { - if m != nil { - return m.Cols - } - return 0 -} - -func init() { - proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") - proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") - proto.RegisterType((*RefMapDeprecated)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.RefMapDeprecated.RefsEntry") - proto.RegisterType((*Ref)(nil), "moby.buildkit.v1.frontend.Ref") - proto.RegisterType((*RefMap)(nil), "moby.buildkit.v1.frontend.RefMap") - proto.RegisterMapType((map[string]*Ref)(nil), "moby.buildkit.v1.frontend.RefMap.RefsEntry") - proto.RegisterType((*ReturnRequest)(nil), "moby.buildkit.v1.frontend.ReturnRequest") - proto.RegisterType((*ReturnResponse)(nil), "moby.buildkit.v1.frontend.ReturnResponse") - proto.RegisterType((*InputsRequest)(nil), "moby.buildkit.v1.frontend.InputsRequest") - proto.RegisterType((*InputsResponse)(nil), "moby.buildkit.v1.frontend.InputsResponse") - proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.InputsResponse.DefinitionsEntry") - proto.RegisterType((*ResolveImageConfigRequest)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigRequest") - proto.RegisterType((*ResolveImageConfigResponse)(nil), "moby.buildkit.v1.frontend.ResolveImageConfigResponse") - proto.RegisterType((*SolveRequest)(nil), "moby.buildkit.v1.frontend.SolveRequest") - proto.RegisterMapType((map[string]*pb.Definition)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendInputsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.SolveRequest.FrontendOptEntry") - proto.RegisterType((*CacheOptionsEntry)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.v1.frontend.CacheOptionsEntry.AttrsEntry") - proto.RegisterType((*SolveResponse)(nil), "moby.buildkit.v1.frontend.SolveResponse") - proto.RegisterType((*ReadFileRequest)(nil), "moby.buildkit.v1.frontend.ReadFileRequest") - proto.RegisterType((*FileRange)(nil), "moby.buildkit.v1.frontend.FileRange") - proto.RegisterType((*ReadFileResponse)(nil), "moby.buildkit.v1.frontend.ReadFileResponse") - proto.RegisterType((*ReadDirRequest)(nil), "moby.buildkit.v1.frontend.ReadDirRequest") - proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse") - proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest") - proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") - proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") - proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") - proto.RegisterType((*NewContainerRequest)(nil), "moby.buildkit.v1.frontend.NewContainerRequest") - proto.RegisterType((*NewContainerResponse)(nil), "moby.buildkit.v1.frontend.NewContainerResponse") - proto.RegisterType((*ReleaseContainerRequest)(nil), "moby.buildkit.v1.frontend.ReleaseContainerRequest") - proto.RegisterType((*ReleaseContainerResponse)(nil), "moby.buildkit.v1.frontend.ReleaseContainerResponse") - proto.RegisterType((*ExecMessage)(nil), "moby.buildkit.v1.frontend.ExecMessage") - proto.RegisterType((*InitMessage)(nil), "moby.buildkit.v1.frontend.InitMessage") - proto.RegisterType((*ExitMessage)(nil), "moby.buildkit.v1.frontend.ExitMessage") - proto.RegisterType((*StartedMessage)(nil), "moby.buildkit.v1.frontend.StartedMessage") - proto.RegisterType((*DoneMessage)(nil), "moby.buildkit.v1.frontend.DoneMessage") - proto.RegisterType((*FdMessage)(nil), "moby.buildkit.v1.frontend.FdMessage") - proto.RegisterType((*ResizeMessage)(nil), "moby.buildkit.v1.frontend.ResizeMessage") -} - -func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } - -var fileDescriptor_f1a937782ebbded5 = []byte{ - // 1909 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x8a, 0xa4, 0x48, 0x3e, 0xfe, 0x31, 0x33, 0x4e, 0x53, 0x7a, 0x11, 0x38, 0xcc, 0x22, - 0x55, 0x69, 0x47, 0x59, 0xa6, 0x74, 0x02, 0xb9, 0x72, 0x90, 0xd4, 0x94, 0x28, 0x58, 0x8d, 0x24, - 0xab, 0xe3, 0x14, 0x06, 0x82, 0x14, 0xe8, 0x8a, 0x3b, 0xa4, 0x17, 0xa6, 0x76, 0xb7, 0xb3, 0x43, - 0xcb, 0x4c, 0x2e, 0xed, 0xad, 0xf7, 0x02, 0xbd, 0x16, 0xe8, 0x27, 0xe8, 0xa5, 0xd7, 0x9e, 0x73, - 0xec, 0xb9, 0x07, 0xa3, 0x10, 0xfa, 0x11, 0x7a, 0x6f, 0xf1, 0x66, 0x67, 0xc8, 0x25, 0x45, 0x2d, - 0x49, 0xe4, 0xc4, 0x99, 0xb7, 0xef, 0xf7, 0xe6, 0xfd, 0x9b, 0xf7, 0xde, 0x10, 0x2a, 0x03, 0x47, - 0xb0, 0x4b, 0x67, 0x6c, 0x87, 0x3c, 0x10, 0x01, 0xb9, 0x73, 0x11, 0x9c, 0x8f, 0xed, 0xf3, 0x91, - 0x37, 0x74, 0x5f, 0x7a, 0xc2, 0x7e, 0xf5, 0x33, 0xbb, 0xcf, 0x03, 0x5f, 0x30, 0xdf, 0x35, 0x3f, - 0x1a, 0x78, 0xe2, 0xc5, 0xe8, 0xdc, 0xee, 0x05, 0x17, 0xad, 0x41, 0x30, 0x08, 0x5a, 0x12, 0x71, - 0x3e, 0xea, 0xcb, 0x9d, 0xdc, 0xc8, 0x55, 0x2c, 0xc9, 0x6c, 0xcf, 0xb3, 0x0f, 0x82, 0x60, 0x30, - 0x64, 0x4e, 0xe8, 0x45, 0x6a, 0xd9, 0xe2, 0x61, 0xaf, 0x15, 0x09, 0x47, 0x8c, 0x22, 0x85, 0xd9, - 0x49, 0x60, 0x50, 0x91, 0x96, 0x56, 0xa4, 0x15, 0x05, 0xc3, 0x57, 0x8c, 0xb7, 0xc2, 0xf3, 0x56, - 0x10, 0x6a, 0xee, 0xd6, 0x8d, 0xdc, 0x4e, 0xe8, 0xb5, 0xc4, 0x38, 0x64, 0x51, 0xeb, 0x32, 0xe0, - 0x2f, 0x19, 0x57, 0x80, 0x07, 0x37, 0x02, 0x46, 0xc2, 0x1b, 0x22, 0xaa, 0xe7, 0x84, 0x11, 0x1e, - 0x82, 0xbf, 0x0a, 0x94, 0x34, 0x5b, 0x04, 0xbe, 0x17, 0x09, 0xcf, 0x1b, 0x78, 0xad, 0x7e, 0x24, - 0x31, 0xf1, 0x29, 0x68, 0x44, 0xcc, 0x6e, 0xfd, 0x31, 0x03, 0x5b, 0x94, 0x45, 0xa3, 0xa1, 0x20, - 0xdb, 0x50, 0xe1, 0xac, 0x7f, 0xc0, 0x42, 0xce, 0x7a, 0x8e, 0x60, 0x6e, 0xdd, 0x68, 0x18, 0xcd, - 0xe2, 0x93, 0x0d, 0x3a, 0x4b, 0x26, 0xbf, 0x86, 0x2a, 0x67, 0xfd, 0x28, 0xc1, 0xb8, 0xd9, 0x30, - 0x9a, 0xa5, 0xf6, 0x87, 0xf6, 0x8d, 0xc1, 0xb0, 0x29, 0xeb, 0x9f, 0x38, 0xe1, 0x14, 0xf2, 0x64, - 0x83, 0xce, 0x09, 0x21, 0x6d, 0xc8, 0x70, 0xd6, 0xaf, 0x67, 0xa4, 0xac, 0xbb, 0xe9, 0xb2, 0x9e, - 0x6c, 0x50, 0x64, 0x26, 0xbb, 0x90, 0x45, 0x29, 0xf5, 0xac, 0x04, 0xbd, 0xbf, 0x54, 0x81, 0x27, - 0x1b, 0x54, 0x02, 0xc8, 0x97, 0x50, 0xb8, 0x60, 0xc2, 0x71, 0x1d, 0xe1, 0xd4, 0xa1, 0x91, 0x69, - 0x96, 0xda, 0xad, 0x54, 0x30, 0x3a, 0xc8, 0x3e, 0x51, 0x88, 0xae, 0x2f, 0xf8, 0x98, 0x4e, 0x04, - 0x98, 0x8f, 0xa0, 0x32, 0xf3, 0x89, 0xd4, 0x20, 0xf3, 0x92, 0x8d, 0x63, 0xff, 0x51, 0x5c, 0x92, - 0xb7, 0x21, 0xf7, 0xca, 0x19, 0x8e, 0x98, 0x74, 0x55, 0x99, 0xc6, 0x9b, 0xbd, 0xcd, 0x87, 0x46, - 0xa7, 0x00, 0x5b, 0x5c, 0x8a, 0xb7, 0xfe, 0x6c, 0x40, 0x6d, 0xde, 0x4f, 0xe4, 0x48, 0x59, 0x68, - 0x48, 0x25, 0x3f, 0x5d, 0xc3, 0xc5, 0x48, 0x88, 0x62, 0x55, 0xa5, 0x08, 0x73, 0x17, 0x8a, 0x13, - 0xd2, 0x32, 0x15, 0x8b, 0x09, 0x15, 0xad, 0x5d, 0xc8, 0x50, 0xd6, 0x27, 0x55, 0xd8, 0xf4, 0x54, - 0x52, 0xd0, 0x4d, 0xcf, 0x25, 0x0d, 0xc8, 0xb8, 0xac, 0xaf, 0x82, 0x5f, 0xb5, 0xc3, 0x73, 0xfb, - 0x80, 0xf5, 0x3d, 0xdf, 0x13, 0x5e, 0xe0, 0x53, 0xfc, 0x64, 0xfd, 0xd5, 0xc0, 0xe4, 0x42, 0xb5, - 0xc8, 0x17, 0x33, 0x76, 0x2c, 0x4f, 0x95, 0x6b, 0xda, 0x3f, 0x4f, 0xd7, 0xfe, 0x93, 0xa4, 0xf6, - 0x4b, 0xf3, 0x27, 0x69, 0x9d, 0x80, 0x0a, 0x65, 0x62, 0xc4, 0x7d, 0xca, 0x7e, 0x37, 0x62, 0x91, - 0x20, 0x3f, 0xd7, 0x11, 0x91, 0xf2, 0x97, 0xa5, 0x15, 0x32, 0x52, 0x05, 0x20, 0x4d, 0xc8, 0x31, - 0xce, 0x03, 0xae, 0xb4, 0x20, 0x76, 0x5c, 0x39, 0x6c, 0x1e, 0xf6, 0xec, 0x67, 0xb2, 0x72, 0xd0, - 0x98, 0xc1, 0xaa, 0x41, 0x55, 0x9f, 0x1a, 0x85, 0x81, 0x1f, 0x31, 0xeb, 0x16, 0x54, 0x8e, 0xfc, - 0x70, 0x24, 0x22, 0xa5, 0x87, 0xf5, 0x0f, 0x03, 0xaa, 0x9a, 0x12, 0xf3, 0x90, 0x6f, 0xa0, 0x34, - 0xf5, 0xb1, 0x76, 0xe6, 0x5e, 0x8a, 0x7e, 0xb3, 0xf8, 0x44, 0x80, 0x94, 0x6f, 0x93, 0xe2, 0xcc, - 0x53, 0xa8, 0xcd, 0x33, 0x2c, 0xf0, 0xf4, 0x07, 0xb3, 0x9e, 0x9e, 0x0f, 0x7c, 0xc2, 0xb3, 0x7f, - 0x32, 0xe0, 0x0e, 0x65, 0xb2, 0x14, 0x1e, 0x5d, 0x38, 0x03, 0xb6, 0x1f, 0xf8, 0x7d, 0x6f, 0xa0, - 0xdd, 0x5c, 0x93, 0x59, 0xa5, 0x25, 0x63, 0x82, 0x35, 0xa1, 0x70, 0x36, 0x74, 0x44, 0x3f, 0xe0, - 0x17, 0x4a, 0x78, 0x19, 0x85, 0x6b, 0x1a, 0x9d, 0x7c, 0x25, 0x0d, 0x28, 0x29, 0xc1, 0x27, 0x81, - 0xcb, 0x64, 0xcd, 0x28, 0xd2, 0x24, 0x89, 0xd4, 0x21, 0x7f, 0x1c, 0x0c, 0x4e, 0x9d, 0x0b, 0x26, - 0x8b, 0x43, 0x91, 0xea, 0xad, 0xf5, 0x7b, 0x03, 0xcc, 0x45, 0x5a, 0x29, 0x17, 0xff, 0x12, 0xb6, - 0x0e, 0xbc, 0x01, 0x8b, 0xe2, 0xe8, 0x17, 0x3b, 0xed, 0xef, 0xdf, 0xbc, 0xb7, 0xf1, 0xaf, 0x37, - 0xef, 0xdd, 0x4f, 0xd4, 0xd5, 0x20, 0x64, 0x7e, 0x2f, 0xf0, 0x85, 0xe3, 0xf9, 0x8c, 0x63, 0x7b, - 0xf8, 0xc8, 0x95, 0x10, 0x3b, 0x46, 0x52, 0x25, 0x81, 0xbc, 0x03, 0x5b, 0xb1, 0x74, 0x75, 0xed, - 0xd5, 0xce, 0xfa, 0x6f, 0x0e, 0xca, 0xcf, 0x50, 0x01, 0xed, 0x0b, 0x1b, 0x60, 0xea, 0x42, 0x95, - 0x76, 0xf3, 0x8e, 0x4d, 0x70, 0x10, 0x13, 0x0a, 0x87, 0x2a, 0xc4, 0xea, 0xba, 0x4e, 0xf6, 0xe4, - 0x6b, 0x28, 0xe9, 0xf5, 0xd3, 0x50, 0xd4, 0x33, 0x32, 0x47, 0x1e, 0xa6, 0xe4, 0x48, 0x52, 0x13, - 0x3b, 0x01, 0x55, 0x19, 0x92, 0xa0, 0x90, 0xcf, 0xe0, 0xce, 0xd1, 0x45, 0x18, 0x70, 0xb1, 0xef, - 0xf4, 0x5e, 0x30, 0x3a, 0xdb, 0x05, 0xb2, 0x8d, 0x4c, 0xb3, 0x48, 0x6f, 0x66, 0x20, 0x3b, 0xf0, - 0x96, 0x33, 0x1c, 0x06, 0x97, 0xea, 0xd2, 0xc8, 0xf4, 0xaf, 0xe7, 0x1a, 0x46, 0xb3, 0x40, 0xaf, - 0x7f, 0x20, 0x1f, 0xc3, 0xed, 0x04, 0xf1, 0x31, 0xe7, 0xce, 0x18, 0xf3, 0x65, 0x4b, 0xf2, 0x2f, - 0xfa, 0x84, 0x15, 0xec, 0xd0, 0xf3, 0x9d, 0x61, 0x1d, 0x24, 0x4f, 0xbc, 0x21, 0x16, 0x94, 0xbb, - 0xaf, 0x51, 0x25, 0xc6, 0x1f, 0x0b, 0xc1, 0xeb, 0x25, 0x19, 0x8a, 0x19, 0x1a, 0x39, 0x83, 0xb2, - 0x54, 0x38, 0xd6, 0x3d, 0xaa, 0x97, 0xa5, 0xd3, 0x76, 0x52, 0x9c, 0x26, 0xd9, 0x9f, 0x86, 0x89, - 0xab, 0x34, 0x23, 0x81, 0xf4, 0xa0, 0xaa, 0x1d, 0x17, 0xdf, 0xc1, 0x7a, 0x45, 0xca, 0x7c, 0xb4, - 0x6e, 0x20, 0x62, 0x74, 0x7c, 0xc4, 0x9c, 0x48, 0x4c, 0x83, 0x2e, 0x5e, 0x37, 0x47, 0xb0, 0x7a, - 0x55, 0xda, 0x3c, 0xd9, 0x9b, 0x9f, 0x43, 0x6d, 0x3e, 0x96, 0xeb, 0x14, 0x7d, 0xf3, 0x57, 0x70, - 0x7b, 0x81, 0x0a, 0x3f, 0xa8, 0x1e, 0xfc, 0xcd, 0x80, 0xb7, 0xae, 0xf9, 0x8d, 0x10, 0xc8, 0x7e, - 0x35, 0x0e, 0x99, 0x12, 0x29, 0xd7, 0xe4, 0x04, 0x72, 0x18, 0x97, 0xa8, 0xbe, 0x29, 0x9d, 0xb6, - 0xbb, 0x4e, 0x20, 0x6c, 0x89, 0x8c, 0x1d, 0x16, 0x4b, 0x31, 0x1f, 0x02, 0x4c, 0x89, 0x6b, 0xb5, - 0xbe, 0x6f, 0xa0, 0xa2, 0xa2, 0xa2, 0xca, 0x43, 0x2d, 0x9e, 0x52, 0x14, 0x18, 0x67, 0x90, 0x69, - 0xbb, 0xc8, 0xac, 0xd9, 0x2e, 0xac, 0xef, 0xe0, 0x16, 0x65, 0x8e, 0x7b, 0xe8, 0x0d, 0xd9, 0xcd, - 0x55, 0x11, 0xef, 0xba, 0x37, 0x64, 0x67, 0x8e, 0x78, 0x31, 0xb9, 0xeb, 0x6a, 0x4f, 0xf6, 0x20, - 0x47, 0x1d, 0x7f, 0xc0, 0xd4, 0xd1, 0x1f, 0xa4, 0x1c, 0x2d, 0x0f, 0x41, 0x5e, 0x1a, 0x43, 0xac, - 0x47, 0x50, 0x9c, 0xd0, 0xb0, 0x52, 0x3d, 0xed, 0xf7, 0x23, 0x16, 0x57, 0xbd, 0x0c, 0x55, 0x3b, - 0xa4, 0x1f, 0x33, 0x7f, 0xa0, 0x8e, 0xce, 0x50, 0xb5, 0xb3, 0xb6, 0x71, 0x54, 0xd1, 0x9a, 0x2b, - 0xd7, 0x10, 0xc8, 0x1e, 0xe0, 0x3c, 0x65, 0xc8, 0x0b, 0x26, 0xd7, 0x96, 0x8b, 0x6d, 0xce, 0x71, - 0x0f, 0x3c, 0x7e, 0xb3, 0x81, 0x75, 0xc8, 0x1f, 0x78, 0x3c, 0x61, 0x9f, 0xde, 0x92, 0x6d, 0x6c, - 0x80, 0xbd, 0xe1, 0xc8, 0x45, 0x6b, 0x05, 0xe3, 0xbe, 0xaa, 0xf4, 0x73, 0x54, 0xeb, 0x8b, 0xd8, - 0x8f, 0xf2, 0x14, 0xa5, 0xcc, 0x0e, 0xe4, 0x99, 0x2f, 0xb8, 0xc7, 0x74, 0x97, 0x24, 0x76, 0x3c, - 0x02, 0xdb, 0x72, 0x04, 0x96, 0xdd, 0x98, 0x6a, 0x16, 0x6b, 0x17, 0x6e, 0x21, 0x21, 0x3d, 0x10, - 0x04, 0xb2, 0x09, 0x25, 0xe5, 0xda, 0xda, 0x83, 0xda, 0x14, 0xa8, 0x8e, 0xde, 0x86, 0x2c, 0x0e, - 0xd8, 0xaa, 0x8c, 0x2f, 0x3a, 0x57, 0x7e, 0xb7, 0x2a, 0x50, 0x3a, 0xf3, 0x7c, 0xdd, 0x0f, 0xad, - 0x2b, 0x03, 0xca, 0x67, 0x81, 0x3f, 0xed, 0x44, 0x67, 0x70, 0x4b, 0xdf, 0xc0, 0xc7, 0x67, 0x47, - 0xfb, 0x4e, 0xa8, 0x4d, 0x69, 0x5c, 0x0f, 0xb3, 0x7a, 0x0b, 0xd8, 0x31, 0x63, 0x27, 0x8b, 0x4d, - 0x8b, 0xce, 0xc3, 0xc9, 0x2f, 0x20, 0x7f, 0x7c, 0xdc, 0x91, 0x92, 0x36, 0xd7, 0x92, 0xa4, 0x61, - 0xe4, 0x73, 0xc8, 0x3f, 0x97, 0x4f, 0x94, 0x48, 0x35, 0x96, 0x05, 0x29, 0x17, 0x1b, 0x1a, 0xb3, - 0x51, 0xd6, 0x0b, 0xb8, 0x4b, 0x35, 0xc8, 0xfa, 0x8f, 0x01, 0xb7, 0x4f, 0xd9, 0xe5, 0xbe, 0x6e, - 0x9e, 0xda, 0xdb, 0x0d, 0x28, 0x4d, 0x68, 0x47, 0x07, 0xca, 0xeb, 0x49, 0x12, 0x79, 0x1f, 0xb6, - 0x4e, 0x82, 0x91, 0x2f, 0xb4, 0xea, 0x45, 0xac, 0x33, 0x92, 0x42, 0xd5, 0x07, 0xf2, 0x13, 0xc8, - 0x9f, 0x32, 0x81, 0x4f, 0x28, 0x99, 0x27, 0xd5, 0x76, 0x09, 0x79, 0x4e, 0x99, 0xc0, 0x89, 0x80, - 0xea, 0x6f, 0x38, 0x66, 0x84, 0x7a, 0xcc, 0xc8, 0x2e, 0x1a, 0x33, 0xf4, 0x57, 0xb2, 0x0b, 0xa5, - 0x5e, 0xe0, 0x47, 0x82, 0x3b, 0x1e, 0x1e, 0x9c, 0x93, 0xcc, 0x3f, 0x42, 0xe6, 0xd8, 0x9e, 0xfd, - 0xe9, 0x47, 0x9a, 0xe4, 0xb4, 0xde, 0x81, 0xb7, 0x67, 0xad, 0x54, 0x33, 0xde, 0x23, 0xf8, 0x31, - 0x65, 0x43, 0xe6, 0x44, 0x6c, 0x7d, 0x0f, 0x58, 0x26, 0xd4, 0xaf, 0x83, 0x95, 0xe0, 0xbf, 0x67, - 0xa0, 0xd4, 0x7d, 0xcd, 0x7a, 0x27, 0x2c, 0x8a, 0x9c, 0x01, 0x23, 0xef, 0x42, 0xf1, 0x8c, 0x07, - 0x3d, 0x16, 0x45, 0x13, 0x59, 0x53, 0x02, 0xf9, 0x0c, 0xb2, 0x47, 0xbe, 0x27, 0x54, 0xc5, 0xde, - 0x4e, 0x9d, 0x1f, 0x3d, 0xa1, 0x64, 0xe2, 0xdb, 0x09, 0xb7, 0x64, 0x0f, 0xb2, 0x98, 0xef, 0xab, - 0xd4, 0x1c, 0x37, 0x81, 0x45, 0x0c, 0xe9, 0xc8, 0xd7, 0xa6, 0xf7, 0x2d, 0x53, 0x9e, 0x6f, 0xa6, - 0x17, 0x4b, 0xef, 0x5b, 0x36, 0x95, 0xa0, 0x90, 0xa4, 0x0b, 0xf9, 0x67, 0xc2, 0xe1, 0x38, 0x72, - 0xc4, 0x11, 0xb9, 0x97, 0xd6, 0x53, 0x63, 0xce, 0xa9, 0x14, 0x8d, 0x45, 0x27, 0x74, 0x5f, 0x7b, - 0x42, 0x0e, 0x14, 0xe9, 0x4e, 0x40, 0xb6, 0x84, 0x21, 0xb8, 0x45, 0xf4, 0x41, 0xe0, 0xb3, 0x7a, - 0x7e, 0x29, 0x1a, 0xd9, 0x12, 0x68, 0xdc, 0x76, 0xf2, 0x90, 0x93, 0x4d, 0xd5, 0xfa, 0x8b, 0x01, - 0xa5, 0x84, 0x8f, 0x57, 0xb8, 0x07, 0xef, 0x42, 0x16, 0x1f, 0x9b, 0x2a, 0x76, 0x05, 0x79, 0x0b, - 0x98, 0x70, 0xa8, 0xa4, 0x62, 0xd5, 0x3a, 0x74, 0xe3, 0xbb, 0x59, 0xa1, 0xb8, 0x44, 0xca, 0x57, - 0x62, 0x2c, 0xdd, 0x5d, 0xa0, 0xb8, 0x24, 0x3b, 0x50, 0x78, 0xc6, 0x7a, 0x23, 0xee, 0x89, 0xb1, - 0x74, 0x60, 0xb5, 0x5d, 0x43, 0x29, 0x9a, 0x26, 0x2f, 0xcb, 0x84, 0xc3, 0xfa, 0x12, 0x13, 0x6b, - 0xaa, 0x20, 0x81, 0xec, 0x3e, 0x8e, 0xdc, 0xa8, 0x59, 0x85, 0xca, 0x35, 0xbe, 0x7a, 0xba, 0xcb, - 0x5e, 0x3d, 0x5d, 0xfd, 0xea, 0x99, 0x0d, 0x08, 0x16, 0xc1, 0x84, 0x83, 0xac, 0xc7, 0x50, 0x9c, - 0x24, 0x0d, 0x3e, 0x38, 0x0f, 0x5d, 0x75, 0xd2, 0xe6, 0xa1, 0x8b, 0xa6, 0x74, 0x9f, 0x1e, 0xca, - 0x53, 0x0a, 0x14, 0x97, 0x93, 0x96, 0x93, 0x49, 0xb4, 0x9c, 0x5d, 0x7c, 0xcf, 0x25, 0x32, 0x07, - 0x99, 0x68, 0x70, 0x19, 0x69, 0x95, 0x71, 0x1d, 0x9b, 0x31, 0x8c, 0xa4, 0x2c, 0x69, 0xc6, 0x30, - 0x6a, 0xff, 0xaf, 0x00, 0xc5, 0xe3, 0xe3, 0x4e, 0x87, 0x7b, 0xee, 0x80, 0x91, 0x3f, 0x18, 0x40, - 0xae, 0x3f, 0x13, 0xc8, 0x27, 0xe9, 0x09, 0xbb, 0xf8, 0xad, 0x63, 0x7e, 0xba, 0x26, 0x4a, 0x75, - 0x80, 0xaf, 0x21, 0x27, 0xa7, 0x0f, 0xf2, 0xd3, 0x15, 0xa7, 0x46, 0xb3, 0xb9, 0x9c, 0x51, 0xc9, - 0xee, 0x41, 0x41, 0x77, 0x70, 0x72, 0x3f, 0x55, 0xbd, 0x99, 0x01, 0xc5, 0xfc, 0x70, 0x25, 0x5e, - 0x75, 0xc8, 0x6f, 0x21, 0xaf, 0x1a, 0x33, 0xb9, 0xb7, 0x04, 0x37, 0x1d, 0x11, 0xcc, 0xfb, 0xab, - 0xb0, 0x4e, 0xcd, 0xd0, 0x0d, 0x38, 0xd5, 0x8c, 0xb9, 0xf6, 0x9e, 0x6a, 0xc6, 0xb5, 0x8e, 0xfe, - 0x1c, 0xb2, 0xd8, 0xa9, 0x49, 0xda, 0x35, 0x4f, 0xb4, 0x72, 0x33, 0x2d, 0x5c, 0x33, 0x2d, 0xfe, - 0x37, 0x58, 0x0e, 0xe5, 0x6b, 0x27, 0xbd, 0x10, 0x26, 0xfe, 0x9e, 0x30, 0xef, 0xad, 0xc0, 0x39, - 0x15, 0xaf, 0x5e, 0x0a, 0xcd, 0x15, 0xfe, 0x23, 0x58, 0x2e, 0x7e, 0xee, 0xdf, 0x88, 0x00, 0xca, - 0xc9, 0x2e, 0x47, 0xec, 0x14, 0xe8, 0x82, 0xa6, 0x6f, 0xb6, 0x56, 0xe6, 0x57, 0x07, 0x7e, 0x87, - 0x53, 0xe7, 0x6c, 0x07, 0x24, 0xed, 0x54, 0x77, 0x2c, 0xec, 0xb5, 0xe6, 0x83, 0xb5, 0x30, 0xea, - 0x70, 0x27, 0xee, 0xb0, 0xaa, 0x8b, 0x92, 0xf4, 0x86, 0x31, 0xe9, 0xc4, 0xe6, 0x8a, 0x7c, 0x4d, - 0xe3, 0x63, 0xa3, 0x53, 0xfe, 0xfe, 0xea, 0xae, 0xf1, 0xcf, 0xab, 0xbb, 0xc6, 0xbf, 0xaf, 0xee, - 0x1a, 0xe7, 0x5b, 0xf2, 0x1f, 0xda, 0x07, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x9d, 0x1a, - 0x7c, 0xf3, 0x16, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// LLBBridgeClient is the client API for LLBBridge service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type LLBBridgeClient interface { - // apicaps:CapResolveImage - ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) - // apicaps:CapSolveBase - Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) - // apicaps:CapReadFile - ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) - // apicaps:CapReadDir - ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) - // apicaps:CapStatFile - StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) - Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) - Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) - // apicaps:CapFrontendInputs - Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) - NewContainer(ctx context.Context, in *NewContainerRequest, opts ...grpc.CallOption) (*NewContainerResponse, error) - ReleaseContainer(ctx context.Context, in *ReleaseContainerRequest, opts ...grpc.CallOption) (*ReleaseContainerResponse, error) - ExecProcess(ctx context.Context, opts ...grpc.CallOption) (LLBBridge_ExecProcessClient, error) -} - -type lLBBridgeClient struct { - cc *grpc.ClientConn -} - -func NewLLBBridgeClient(cc *grpc.ClientConn) LLBBridgeClient { - return &lLBBridgeClient{cc} -} - -func (c *lLBBridgeClient) ResolveImageConfig(ctx context.Context, in *ResolveImageConfigRequest, opts ...grpc.CallOption) (*ResolveImageConfigResponse, error) { - out := new(ResolveImageConfigResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error) { - out := new(SolveResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Solve", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error) { - out := new(ReadFileResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) { - out := new(ReadDirResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) { - out := new(StatFileResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/StatFile", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) { - out := new(PongResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error) { - out := new(ReturnResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Return", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) Inputs(ctx context.Context, in *InputsRequest, opts ...grpc.CallOption) (*InputsResponse, error) { - out := new(InputsResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Inputs", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) NewContainer(ctx context.Context, in *NewContainerRequest, opts ...grpc.CallOption) (*NewContainerResponse, error) { - out := new(NewContainerResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/NewContainer", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) ReleaseContainer(ctx context.Context, in *ReleaseContainerRequest, opts ...grpc.CallOption) (*ReleaseContainerResponse, error) { - out := new(ReleaseContainerResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReleaseContainer", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *lLBBridgeClient) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (LLBBridge_ExecProcessClient, error) { - stream, err := c.cc.NewStream(ctx, &_LLBBridge_serviceDesc.Streams[0], "/moby.buildkit.v1.frontend.LLBBridge/ExecProcess", opts...) - if err != nil { - return nil, err - } - x := &lLBBridgeExecProcessClient{stream} - return x, nil -} - -type LLBBridge_ExecProcessClient interface { - Send(*ExecMessage) error - Recv() (*ExecMessage, error) - grpc.ClientStream -} - -type lLBBridgeExecProcessClient struct { - grpc.ClientStream -} - -func (x *lLBBridgeExecProcessClient) Send(m *ExecMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *lLBBridgeExecProcessClient) Recv() (*ExecMessage, error) { - m := new(ExecMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// LLBBridgeServer is the server API for LLBBridge service. -type LLBBridgeServer interface { - // apicaps:CapResolveImage - ResolveImageConfig(context.Context, *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) - // apicaps:CapSolveBase - Solve(context.Context, *SolveRequest) (*SolveResponse, error) - // apicaps:CapReadFile - ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error) - // apicaps:CapReadDir - ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error) - // apicaps:CapStatFile - StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error) - Ping(context.Context, *PingRequest) (*PongResponse, error) - Return(context.Context, *ReturnRequest) (*ReturnResponse, error) - // apicaps:CapFrontendInputs - Inputs(context.Context, *InputsRequest) (*InputsResponse, error) - NewContainer(context.Context, *NewContainerRequest) (*NewContainerResponse, error) - ReleaseContainer(context.Context, *ReleaseContainerRequest) (*ReleaseContainerResponse, error) - ExecProcess(LLBBridge_ExecProcessServer) error -} - -// UnimplementedLLBBridgeServer can be embedded to have forward compatible implementations. -type UnimplementedLLBBridgeServer struct { -} - -func (*UnimplementedLLBBridgeServer) ResolveImageConfig(ctx context.Context, req *ResolveImageConfigRequest) (*ResolveImageConfigResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ResolveImageConfig not implemented") -} -func (*UnimplementedLLBBridgeServer) Solve(ctx context.Context, req *SolveRequest) (*SolveResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Solve not implemented") -} -func (*UnimplementedLLBBridgeServer) ReadFile(ctx context.Context, req *ReadFileRequest) (*ReadFileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadFile not implemented") -} -func (*UnimplementedLLBBridgeServer) ReadDir(ctx context.Context, req *ReadDirRequest) (*ReadDirResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReadDir not implemented") -} -func (*UnimplementedLLBBridgeServer) StatFile(ctx context.Context, req *StatFileRequest) (*StatFileResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StatFile not implemented") -} -func (*UnimplementedLLBBridgeServer) Ping(ctx context.Context, req *PingRequest) (*PongResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") -} -func (*UnimplementedLLBBridgeServer) Return(ctx context.Context, req *ReturnRequest) (*ReturnResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Return not implemented") -} -func (*UnimplementedLLBBridgeServer) Inputs(ctx context.Context, req *InputsRequest) (*InputsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Inputs not implemented") -} -func (*UnimplementedLLBBridgeServer) NewContainer(ctx context.Context, req *NewContainerRequest) (*NewContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method NewContainer not implemented") -} -func (*UnimplementedLLBBridgeServer) ReleaseContainer(ctx context.Context, req *ReleaseContainerRequest) (*ReleaseContainerResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ReleaseContainer not implemented") -} -func (*UnimplementedLLBBridgeServer) ExecProcess(srv LLBBridge_ExecProcessServer) error { - return status.Errorf(codes.Unimplemented, "method ExecProcess not implemented") -} - -func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) { - s.RegisterService(&_LLBBridge_serviceDesc, srv) -} - -func _LLBBridge_ResolveImageConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ResolveImageConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ResolveImageConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ResolveImageConfig", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ResolveImageConfig(ctx, req.(*ResolveImageConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_Solve_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SolveRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Solve(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Solve", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Solve(ctx, req.(*SolveRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_ReadFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadFileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ReadFile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadFile", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ReadFile(ctx, req.(*ReadFileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReadDirRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ReadDir(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ReadDir(ctx, req.(*ReadDirRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatFileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).StatFile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/StatFile", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).StatFile(ctx, req.(*StatFileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PingRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Ping(ctx, req.(*PingRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_Return_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReturnRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Return(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Return", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Return(ctx, req.(*ReturnRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_Inputs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(InputsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).Inputs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Inputs", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).Inputs(ctx, req.(*InputsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_NewContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(NewContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).NewContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/NewContainer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).NewContainer(ctx, req.(*NewContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_ReleaseContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ReleaseContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(LLBBridgeServer).ReleaseContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReleaseContainer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(LLBBridgeServer).ReleaseContainer(ctx, req.(*ReleaseContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _LLBBridge_ExecProcess_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(LLBBridgeServer).ExecProcess(&lLBBridgeExecProcessServer{stream}) -} - -type LLBBridge_ExecProcessServer interface { - Send(*ExecMessage) error - Recv() (*ExecMessage, error) - grpc.ServerStream -} - -type lLBBridgeExecProcessServer struct { - grpc.ServerStream -} - -func (x *lLBBridgeExecProcessServer) Send(m *ExecMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *lLBBridgeExecProcessServer) Recv() (*ExecMessage, error) { - m := new(ExecMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _LLBBridge_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.v1.frontend.LLBBridge", - HandlerType: (*LLBBridgeServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "ResolveImageConfig", - Handler: _LLBBridge_ResolveImageConfig_Handler, - }, - { - MethodName: "Solve", - Handler: _LLBBridge_Solve_Handler, - }, - { - MethodName: "ReadFile", - Handler: _LLBBridge_ReadFile_Handler, - }, - { - MethodName: "ReadDir", - Handler: _LLBBridge_ReadDir_Handler, - }, - { - MethodName: "StatFile", - Handler: _LLBBridge_StatFile_Handler, - }, - { - MethodName: "Ping", - Handler: _LLBBridge_Ping_Handler, - }, - { - MethodName: "Return", - Handler: _LLBBridge_Return_Handler, - }, - { - MethodName: "Inputs", - Handler: _LLBBridge_Inputs_Handler, - }, - { - MethodName: "NewContainer", - Handler: _LLBBridge_NewContainer_Handler, - }, - { - MethodName: "ReleaseContainer", - Handler: _LLBBridge_ReleaseContainer_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ExecProcess", - Handler: _LLBBridge_ExecProcess_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "gateway.proto", -} - -func (m *Result) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Result) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Result) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Metadata) > 0 { - for k := range m.Metadata { - v := m.Metadata[k] - baseI := i - if len(v) > 0 { - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - if m.Result != nil { - { - size := m.Result.Size() - i -= size - if _, err := m.Result.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *Result_RefDeprecated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Result_RefDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i -= len(m.RefDeprecated) - copy(dAtA[i:], m.RefDeprecated) - i = encodeVarintGateway(dAtA, i, uint64(len(m.RefDeprecated))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} -func (m *Result_RefsDeprecated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Result_RefsDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.RefsDeprecated != nil { - { - size, err := m.RefsDeprecated.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *Result_Ref) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Result_Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Ref != nil { - { - size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *Result_Refs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Result_Refs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Refs != nil { - { - size, err := m.Refs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *RefMapDeprecated) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RefMapDeprecated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RefMapDeprecated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Refs) > 0 { - for k := range m.Refs { - v := m.Refs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Ref) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ref) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ref) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Def != nil { - { - size, err := m.Def.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RefMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RefMap) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RefMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Refs) > 0 { - for k := range m.Refs { - v := m.Refs[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ReturnRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReturnRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReturnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Error != nil { - { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReturnResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReturnResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReturnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *InputsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InputsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InputsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *InputsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InputsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InputsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Definitions) > 0 { - for k := range m.Definitions { - v := m.Definitions[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ResolveImageConfigRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResolveImageConfigRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResolveImageConfigRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.LogName) > 0 { - i -= len(m.LogName) - copy(dAtA[i:], m.LogName) - i = encodeVarintGateway(dAtA, i, uint64(len(m.LogName))) - i-- - dAtA[i] = 0x22 - } - if len(m.ResolveMode) > 0 { - i -= len(m.ResolveMode) - copy(dAtA[i:], m.ResolveMode) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ResolveMode))) - i-- - dAtA[i] = 0x1a - } - if m.Platform != nil { - { - size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResolveImageConfigResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResolveImageConfigResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResolveImageConfigResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Config) > 0 { - i -= len(m.Config) - copy(dAtA[i:], m.Config) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Config))) - i-- - dAtA[i] = 0x12 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SolveRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SolveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Evaluate { - i-- - if m.Evaluate { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 - } - if len(m.FrontendInputs) > 0 { - for k := range m.FrontendInputs { - v := m.FrontendInputs[k] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x6a - } - } - if len(m.CacheImports) > 0 { - for iNdEx := len(m.CacheImports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.CacheImports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - } - if len(m.ExporterAttr) > 0 { - i -= len(m.ExporterAttr) - copy(dAtA[i:], m.ExporterAttr) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ExporterAttr))) - i-- - dAtA[i] = 0x5a - } - if m.Final { - i-- - if m.Final { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - if m.AllowResultArrayRef { - i-- - if m.AllowResultArrayRef { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.AllowResultReturn { - i-- - if m.AllowResultReturn { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.ImportCacheRefsDeprecated) > 0 { - for iNdEx := len(m.ImportCacheRefsDeprecated) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ImportCacheRefsDeprecated[iNdEx]) - copy(dAtA[i:], m.ImportCacheRefsDeprecated[iNdEx]) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ImportCacheRefsDeprecated[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.FrontendOpt) > 0 { - for k := range m.FrontendOpt { - v := m.FrontendOpt[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Frontend) > 0 { - i -= len(m.Frontend) - copy(dAtA[i:], m.Frontend) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Frontend))) - i-- - dAtA[i] = 0x12 - } - if m.Definition != nil { - { - size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CacheOptionsEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheOptionsEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CacheOptionsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Attrs) > 0 { - for k := range m.Attrs { - v := m.Attrs[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGateway(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintGateway(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintGateway(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Type) > 0 { - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SolveResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SolveResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SolveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Result != nil { - { - size, err := m.Result.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadFileRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadFileRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Range != nil { - { - size, err := m.Range.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.FilePath) > 0 { - i -= len(m.FilePath) - copy(dAtA[i:], m.FilePath) - i = encodeVarintGateway(dAtA, i, uint64(len(m.FilePath))) - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FileRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Length != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Length)) - i-- - dAtA[i] = 0x10 - } - if m.Offset != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Offset)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ReadFileResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadDirRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.IncludePattern) > 0 { - i -= len(m.IncludePattern) - copy(dAtA[i:], m.IncludePattern) - i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern))) - i-- - dAtA[i] = 0x1a - } - if len(m.DirPath) > 0 { - i -= len(m.DirPath) - copy(dAtA[i:], m.DirPath) - i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath))) - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReadDirResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Entries) > 0 { - for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *StatFileRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatFileRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - } - if len(m.Ref) > 0 { - i -= len(m.Ref) - copy(dAtA[i:], m.Ref) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *StatFileResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatFileResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatFileResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Stat != nil { - { - size, err := m.Stat.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *PingRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PingRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PingRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *PongResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PongResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PongResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Workers) > 0 { - for iNdEx := len(m.Workers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Workers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.LLBCaps) > 0 { - for iNdEx := len(m.LLBCaps) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.LLBCaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.FrontendAPICaps) > 0 { - for iNdEx := len(m.FrontendAPICaps) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.FrontendAPICaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *NewContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NewContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NewContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Constraints != nil { - { - size, err := m.Constraints.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Platform != nil { - { - size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Network != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Network)) - i-- - dAtA[i] = 0x18 - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *NewContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NewContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NewContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ReleaseContainerRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReleaseContainerRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReleaseContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ReleaseContainerResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReleaseContainerResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReleaseContainerResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *ExecMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Input != nil { - { - size := m.Input.Size() - i -= size - if _, err := m.Input.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.ProcessID) > 0 { - i -= len(m.ProcessID) - copy(dAtA[i:], m.ProcessID) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ProcessID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecMessage_Init) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecMessage_Init) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Init != nil { - { - size, err := m.Init.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *ExecMessage_File) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecMessage_File) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.File != nil { - { - size, err := m.File.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *ExecMessage_Resize) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecMessage_Resize) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Resize != nil { - { - size, err := m.Resize.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *ExecMessage_Started) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecMessage_Started) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Started != nil { - { - size, err := m.Started.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *ExecMessage_Exit) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecMessage_Exit) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Exit != nil { - { - size, err := m.Exit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *ExecMessage_Done) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecMessage_Done) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Done != nil { - { - size, err := m.Done.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} -func (m *InitMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InitMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Security != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Security)) - i-- - dAtA[i] = 0x28 - } - if m.Tty { - i-- - if m.Tty { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.Fds) > 0 { - dAtA24 := make([]byte, len(m.Fds)*10) - var j23 int - for _, num := range m.Fds { - for num >= 1<<7 { - dAtA24[j23] = uint8(uint64(num)&0x7f | 0x80) - num >>= 7 - j23++ - } - dAtA24[j23] = uint8(num) - j23++ - } - i -= j23 - copy(dAtA[i:], dAtA24[:j23]) - i = encodeVarintGateway(dAtA, i, uint64(j23)) - i-- - dAtA[i] = 0x1a - } - if m.Meta != nil { - { - size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.ContainerID) > 0 { - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintGateway(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExitMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExitMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Error != nil { - { - size, err := m.Error.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGateway(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Code != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *StartedMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StartedMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StartedMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *DoneMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DoneMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DoneMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - return len(dAtA) - i, nil -} - -func (m *FdMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FdMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FdMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGateway(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a - } - if m.EOF { - i-- - if m.EOF { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.Fd != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Fd)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ResizeMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResizeMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResizeMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if m.Cols != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Cols)) - i-- - dAtA[i] = 0x10 - } - if m.Rows != 0 { - i = encodeVarintGateway(dAtA, i, uint64(m.Rows)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { - offset -= sovGateway(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Result) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Result != nil { - n += m.Result.Size() - } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - l = 0 - if len(v) > 0 { - l = 1 + len(v) + sovGateway(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Result_RefDeprecated) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.RefDeprecated) - n += 1 + l + sovGateway(uint64(l)) - return n -} -func (m *Result_RefsDeprecated) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.RefsDeprecated != nil { - l = m.RefsDeprecated.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *Result_Ref) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Ref != nil { - l = m.Ref.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *Result_Refs) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Refs != nil { - l = m.Refs.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *RefMapDeprecated) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Refs) > 0 { - for k, v := range m.Refs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Ref) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Def != nil { - l = m.Def.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RefMap) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Refs) > 0 { - for k, v := range m.Refs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovGateway(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReturnRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.Error != nil { - l = m.Error.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReturnResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InputsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *InputsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Definitions) > 0 { - for k, v := range m.Definitions { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovGateway(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResolveImageConfigRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Platform != nil { - l = m.Platform.Size() - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.ResolveMode) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.LogName) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResolveImageConfigResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Config) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SolveRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Definition != nil { - l = m.Definition.Size() - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Frontend) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if len(m.FrontendOpt) > 0 { - for k, v := range m.FrontendOpt { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if len(m.ImportCacheRefsDeprecated) > 0 { - for _, s := range m.ImportCacheRefsDeprecated { - l = len(s) - n += 1 + l + sovGateway(uint64(l)) - } - } - if m.AllowResultReturn { - n += 2 - } - if m.AllowResultArrayRef { - n += 2 - } - if m.Final { - n += 2 - } - l = len(m.ExporterAttr) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if len(m.CacheImports) > 0 { - for _, e := range m.CacheImports { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if len(m.FrontendInputs) > 0 { - for k, v := range m.FrontendInputs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovGateway(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if m.Evaluate { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *CacheOptionsEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGateway(uint64(len(k))) + 1 + len(v) + sovGateway(uint64(len(v))) - n += mapEntrySize + 1 + sovGateway(uint64(mapEntrySize)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SolveResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadFileRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.FilePath) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Range != nil { - l = m.Range.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *FileRange) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Offset != 0 { - n += 1 + sovGateway(uint64(m.Offset)) - } - if m.Length != 0 { - n += 1 + sovGateway(uint64(m.Length)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadFileResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadDirRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.DirPath) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.IncludePattern) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReadDirResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatFileRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Ref) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - l = len(m.Path) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatFileResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Stat != nil { - l = m.Stat.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PingRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PongResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.FrontendAPICaps) > 0 { - for _, e := range m.FrontendAPICaps { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if len(m.LLBCaps) > 0 { - for _, e := range m.LLBCaps { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if len(m.Workers) > 0 { - for _, e := range m.Workers { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NewContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovGateway(uint64(l)) - } - } - if m.Network != 0 { - n += 1 + sovGateway(uint64(m.Network)) - } - if m.Platform != nil { - l = m.Platform.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.Constraints != nil { - l = m.Constraints.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NewContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReleaseContainerRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ReleaseContainerResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ProcessID) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Input != nil { - n += m.Input.Size() - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExecMessage_Init) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Init != nil { - l = m.Init.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *ExecMessage_File) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.File != nil { - l = m.File.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *ExecMessage_Resize) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Resize != nil { - l = m.Resize.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *ExecMessage_Started) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Started != nil { - l = m.Started.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *ExecMessage_Exit) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exit != nil { - l = m.Exit.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *ExecMessage_Done) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Done != nil { - l = m.Done.Size() - n += 1 + l + sovGateway(uint64(l)) - } - return n -} -func (m *InitMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerID) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.Meta != nil { - l = m.Meta.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if len(m.Fds) > 0 { - l = 0 - for _, e := range m.Fds { - l += sovGateway(uint64(e)) - } - n += 1 + sovGateway(uint64(l)) + l - } - if m.Tty { - n += 2 - } - if m.Security != 0 { - n += 1 + sovGateway(uint64(m.Security)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExitMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Code != 0 { - n += 1 + sovGateway(uint64(m.Code)) - } - if m.Error != nil { - l = m.Error.Size() - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StartedMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DoneMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *FdMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Fd != 0 { - n += 1 + sovGateway(uint64(m.Fd)) - } - if m.EOF { - n += 2 - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovGateway(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ResizeMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Rows != 0 { - n += 1 + sovGateway(uint64(m.Rows)) - } - if m.Cols != 0 { - n += 1 + sovGateway(uint64(m.Cols)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGateway(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGateway(x uint64) (n int) { - return sovGateway(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Result) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Result: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Result: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RefDeprecated", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Result = &Result_RefDeprecated{string(dAtA[iNdEx:postIndex])} - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RefsDeprecated", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RefMapDeprecated{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &Result_RefsDeprecated{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &Ref{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &Result_Ref{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &RefMap{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Result = &Result_Refs{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = make(map[string][]byte) - } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGateway - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLengthGateway - } - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metadata[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RefMapDeprecated) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RefMapDeprecated: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RefMapDeprecated: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Refs == nil { - m.Refs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Refs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ref) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ref: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ref: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Def == nil { - m.Def = &pb.Definition{} - } - if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RefMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RefMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RefMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Refs == nil { - m.Refs = make(map[string]*Ref) - } - var mapkey string - var mapvalue *Ref - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGateway - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGateway - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Ref{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Refs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReturnRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReturnRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReturnRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &Result{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Error == nil { - m.Error = &rpc.Status{} - } - if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReturnResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReturnResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReturnResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InputsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InputsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InputsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InputsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InputsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InputsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definitions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definitions == nil { - m.Definitions = make(map[string]*pb.Definition) - } - var mapkey string - var mapvalue *pb.Definition - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGateway - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGateway - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &pb.Definition{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Definitions[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResolveImageConfigRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResolveImageConfigRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResolveImageConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Platform == nil { - m.Platform = &pb.Platform{} - } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResolveMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResolveMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LogName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LogName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResolveImageConfigResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResolveImageConfigResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResolveImageConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Config = append(m.Config[:0], dAtA[iNdEx:postIndex]...) - if m.Config == nil { - m.Config = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definition == nil { - m.Definition = &pb.Definition{} - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Frontend", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Frontend = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FrontendOpt == nil { - m.FrontendOpt = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.FrontendOpt[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImportCacheRefsDeprecated", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImportCacheRefsDeprecated = append(m.ImportCacheRefsDeprecated, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowResultReturn", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowResultReturn = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowResultArrayRef", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowResultArrayRef = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Final", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Final = bool(v != 0) - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExporterAttr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExporterAttr = append(m.ExporterAttr[:0], dAtA[iNdEx:postIndex]...) - if m.ExporterAttr == nil { - m.ExporterAttr = []byte{} - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CacheImports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CacheImports = append(m.CacheImports, &CacheOptionsEntry{}) - if err := m.CacheImports[len(m.CacheImports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendInputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FrontendInputs == nil { - m.FrontendInputs = make(map[string]*pb.Definition) - } - var mapkey string - var mapvalue *pb.Definition - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGateway - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGateway - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &pb.Definition{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.FrontendInputs[mapkey] = mapvalue - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Evaluate", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Evaluate = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheOptionsEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheOptionsEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOptionsEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attrs == nil { - m.Attrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGateway - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGateway - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SolveResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SolveResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SolveResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &Result{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadFileRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadFileRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FilePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FilePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Range == nil { - m.Range = &FileRange{} - } - if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType) - } - m.Offset = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Offset |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Length", wireType) - } - m.Length = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Length |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadFileResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadFileResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadDirRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadDirRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadDirRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DirPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DirPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePattern", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludePattern = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReadDirResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReadDirResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReadDirResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entries = append(m.Entries, &types.Stat{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatFileRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatFileRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatFileRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ref = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatFileResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatFileResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatFileResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stat == nil { - m.Stat = &types.Stat{} - } - if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PingRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PingRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PingRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PongResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PongResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PongResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FrontendAPICaps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FrontendAPICaps = append(m.FrontendAPICaps, pb1.APICap{}) - if err := m.FrontendAPICaps[len(m.FrontendAPICaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LLBCaps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LLBCaps = append(m.LLBCaps, pb1.APICap{}) - if err := m.LLBCaps[len(m.LLBCaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Workers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Workers = append(m.Workers, &types1.WorkerRecord{}) - if err := m.Workers[len(m.Workers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NewContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NewContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NewContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &pb.Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - m.Network = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Network |= pb.NetMode(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Platform == nil { - m.Platform = &pb.Platform{} - } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Constraints == nil { - m.Constraints = &pb.WorkerConstraints{} - } - if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NewContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NewContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NewContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReleaseContainerRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReleaseContainerRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReleaseContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReleaseContainerResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReleaseContainerResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReleaseContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProcessID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProcessID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Init", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &InitMessage{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Input = &ExecMessage_Init{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &FdMessage{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Input = &ExecMessage_File{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resize", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ResizeMessage{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Input = &ExecMessage_Resize{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &StartedMessage{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Input = &ExecMessage_Started{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ExitMessage{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Input = &ExecMessage_Exit{v} - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Done", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &DoneMessage{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Input = &ExecMessage_Done{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InitMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InitMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InitMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = &pb.Meta{} - } - if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType == 0 { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Fds = append(m.Fds, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Fds) == 0 { - m.Fds = make([]uint32, 0, elementCount) - } - for iNdEx < postIndex { - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Fds = append(m.Fds, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Fds", wireType) - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Tty", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Tty = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Security", wireType) - } - m.Security = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Security |= pb.SecurityMode(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExitMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExitMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExitMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - m.Code = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Code |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Error == nil { - m.Error = &rpc.Status{} - } - if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StartedMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StartedMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StartedMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DoneMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DoneMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DoneMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FdMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FdMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FdMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Fd", wireType) - } - m.Fd = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Fd |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EOF", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.EOF = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGateway - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGateway - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResizeMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResizeMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResizeMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Rows", wireType) - } - m.Rows = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Rows |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Cols", wireType) - } - m.Cols = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGateway - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Cols |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGateway(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGateway - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGateway(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGateway - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGateway - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGateway - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGateway - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGateway - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGateway - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGateway = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGateway = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGateway = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto deleted file mode 100644 index 5fc8a021f7e7..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto +++ /dev/null @@ -1,238 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1.frontend; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; -import "github.com/gogo/googleapis/google/rpc/status.proto"; -import "github.com/moby/buildkit/solver/pb/ops.proto"; -import "github.com/moby/buildkit/api/types/worker.proto"; -import "github.com/moby/buildkit/util/apicaps/pb/caps.proto"; -import "github.com/tonistiigi/fsutil/types/stat.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -service LLBBridge { - // apicaps:CapResolveImage - rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse); - // apicaps:CapSolveBase - rpc Solve(SolveRequest) returns (SolveResponse); - // apicaps:CapReadFile - rpc ReadFile(ReadFileRequest) returns (ReadFileResponse); - // apicaps:CapReadDir - rpc ReadDir(ReadDirRequest) returns (ReadDirResponse); - // apicaps:CapStatFile - rpc StatFile(StatFileRequest) returns (StatFileResponse); - rpc Ping(PingRequest) returns (PongResponse); - rpc Return(ReturnRequest) returns (ReturnResponse); - // apicaps:CapFrontendInputs - rpc Inputs(InputsRequest) returns (InputsResponse); - - rpc NewContainer(NewContainerRequest) returns (NewContainerResponse); - rpc ReleaseContainer(ReleaseContainerRequest) returns (ReleaseContainerResponse); - rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage); -} - -message Result { - oneof result { - // Deprecated non-array refs. - string refDeprecated = 1; - RefMapDeprecated refsDeprecated = 2; - - Ref ref = 3; - RefMap refs = 4; - } - map metadata = 10; -} - -message RefMapDeprecated { - map refs = 1; -} - -message Ref { - string id = 1; - pb.Definition def = 2; -} - -message RefMap { - map refs = 1; -} - -message ReturnRequest { - Result result = 1; - google.rpc.Status error = 2; -} - -message ReturnResponse { -} - -message InputsRequest { -} - -message InputsResponse { - map Definitions = 1; -} - -message ResolveImageConfigRequest { - string Ref = 1; - pb.Platform Platform = 2; - string ResolveMode = 3; - string LogName = 4; -} - -message ResolveImageConfigResponse { - string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - bytes Config = 2; -} - -message SolveRequest { - pb.Definition Definition = 1; - string Frontend = 2; - map FrontendOpt = 3; - // ImportCacheRefsDeprecated is deprecated in favor or the new Imports since BuildKit v0.4.0. - // When ImportCacheRefsDeprecated is set, the solver appends - // {.Type = "registry", .Attrs = {"ref": importCacheRef}} - // for each of the ImportCacheRefs entry to CacheImports for compatibility. (planned to be removed) - repeated string ImportCacheRefsDeprecated = 4; - bool allowResultReturn = 5; - bool allowResultArrayRef = 6; - - // apicaps.CapSolveInlineReturn deprecated - bool Final = 10; - bytes ExporterAttr = 11; - // CacheImports was added in BuildKit v0.4.0. - // apicaps:CapImportCaches - repeated CacheOptionsEntry CacheImports = 12; - - // apicaps:CapFrontendInputs - map FrontendInputs = 13; - - bool Evaluate = 14; -} - -// CacheOptionsEntry corresponds to the control.CacheOptionsEntry -message CacheOptionsEntry { - string Type = 1; - map Attrs = 2; -} - -message SolveResponse { - // deprecated - string ref = 1; // can be used by readfile request - // deprecated -/* bytes ExporterAttr = 2;*/ - - // these fields are returned when allowMapReturn was set - Result result = 3; -} - -message ReadFileRequest { - string Ref = 1; - string FilePath = 2; - FileRange Range = 3; -} - -message FileRange { - int64 Offset = 1; - int64 Length = 2; -} - -message ReadFileResponse { - bytes Data = 1; -} - -message ReadDirRequest { - string Ref = 1; - string DirPath = 2; - string IncludePattern = 3; -} - -message ReadDirResponse { - repeated fsutil.types.Stat entries = 1; -} - -message StatFileRequest { - string Ref = 1; - string Path = 2; -} - -message StatFileResponse { - fsutil.types.Stat stat = 1; -} - -message PingRequest{ -} -message PongResponse{ - repeated moby.buildkit.v1.apicaps.APICap FrontendAPICaps = 1 [(gogoproto.nullable) = false]; - repeated moby.buildkit.v1.apicaps.APICap LLBCaps = 2 [(gogoproto.nullable) = false]; - repeated moby.buildkit.v1.types.WorkerRecord Workers = 3; -} - -message NewContainerRequest { - string ContainerID = 1; - // For mount input values we can use random identifiers passed with ref - repeated pb.Mount Mounts = 2; - pb.NetMode Network = 3; - pb.Platform platform = 4; - pb.WorkerConstraints constraints = 5; -} - -message NewContainerResponse{} - -message ReleaseContainerRequest { - string ContainerID = 1; -} - -message ReleaseContainerResponse{} - -message ExecMessage { - string ProcessID = 1; - oneof Input { - // InitMessage sent from client to server will start a new process in a - // container - InitMessage Init = 2; - // FdMessage used from client to server for input (stdin) and - // from server to client for output (stdout, stderr) - FdMessage File = 3; - // ResizeMessage used from client to server for terminal resize events - ResizeMessage Resize = 4; - // StartedMessage sent from server to client after InitMessage to - // indicate the process has started. - StartedMessage Started = 5; - // ExitMessage sent from server to client will contain the exit code - // when the process ends. - ExitMessage Exit = 6; - // DoneMessage from server to client will be the last message for any - // process. Note that FdMessage might be sent after ExitMessage. - DoneMessage Done = 7; - } -} - -message InitMessage{ - string ContainerID = 1; - pb.Meta Meta = 2; - repeated uint32 Fds = 3; - bool Tty = 4; - pb.SecurityMode Security = 5; -} - -message ExitMessage { - uint32 Code = 1; - google.rpc.Status Error = 2; -} - -message StartedMessage{} - -message DoneMessage{} - -message FdMessage{ - uint32 Fd = 1; // what fd the data was from - bool EOF = 2; // true if eof was reached - bytes Data = 3; -} - -message ResizeMessage{ - uint32 Rows = 1; - uint32 Cols = 2; -} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go deleted file mode 100644 index e17b9daf6b1a..000000000000 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1_frontend //nolint:golint - -//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto diff --git a/vendor/github.com/moby/buildkit/identity/randomid.go b/vendor/github.com/moby/buildkit/identity/randomid.go deleted file mode 100644 index 0eb13527aac5..000000000000 --- a/vendor/github.com/moby/buildkit/identity/randomid.go +++ /dev/null @@ -1,53 +0,0 @@ -package identity - -import ( - cryptorand "crypto/rand" - "fmt" - "io" - "math/big" -) - -var ( - // idReader is used for random id generation. This declaration allows us to - // replace it for testing. - idReader = cryptorand.Reader -) - -// parameters for random identifier generation. We can tweak this when there is -// time for further analysis. -const ( - randomIDEntropyBytes = 17 - randomIDBase = 36 - - // To ensure that all identifiers are fixed length, we make sure they - // get padded out or truncated to 25 characters. - // - // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value - // was calculated from floor(log(2^128-1, 36)) + 1. - // - // While 128 bits is the largest whole-byte size that fits into 25 - // base-36 characters, we generate an extra byte of entropy to fill - // in the high bits, which would otherwise be 0. This gives us a more - // even distribution of the first character. - // - // See http://mathworld.wolfram.com/NumberLength.html for more information. - maxRandomIDLength = 25 -) - -// NewID generates a new identifier for use where random identifiers with low -// collision probability are required. -// -// With the parameters in this package, the generated identifier will provide -// ~129 bits of entropy encoded with base36. Leading padding is added if the -// string is less 25 bytes. We do not intend to maintain this interface, so -// identifiers should be treated opaquely. -func NewID() string { - var p [randomIDEntropyBytes]byte - - if _, err := io.ReadFull(idReader, p[:]); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) - } - - p[0] |= 0x80 // set high bit to avoid the need for padding - return (&big.Int{}).SetBytes(p[:]).Text(randomIDBase)[1 : maxRandomIDLength+1] -} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.go b/vendor/github.com/moby/buildkit/session/auth/auth.go deleted file mode 100644 index 85e6f68053ad..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/auth.go +++ /dev/null @@ -1,130 +0,0 @@ -package auth - -import ( - "context" - "crypto/subtle" - "math/rand" - "sync" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/grpcerrors" - "github.com/pkg/errors" - "golang.org/x/crypto/nacl/sign" - "google.golang.org/grpc/codes" -) - -var salt []byte -var saltOnce sync.Once - -// getSalt returns unique component per daemon restart to avoid persistent keys -func getSalt() []byte { - saltOnce.Do(func() { - salt = make([]byte, 32) - rand.Read(salt) - }) - return salt -} - -func CredentialsFunc(sm *session.Manager, g session.Group) func(string) (session, username, secret string, err error) { - return func(host string) (string, string, string, error) { - var sessionID, user, secret string - err := sm.Any(context.TODO(), g, func(ctx context.Context, id string, c session.Caller) error { - client := NewAuthClient(c.Conn()) - - resp, err := client.Credentials(ctx, &CredentialsRequest{ - Host: host, - }) - if err != nil { - if grpcerrors.Code(err) == codes.Unimplemented { - return nil - } - return err - } - sessionID = id - user = resp.Username - secret = resp.Secret - return nil - }) - if err != nil { - return "", "", "", err - } - return sessionID, user, secret, nil - } -} - -func FetchToken(ctx context.Context, req *FetchTokenRequest, sm *session.Manager, g session.Group) (resp *FetchTokenResponse, err error) { - err = sm.Any(ctx, g, func(ctx context.Context, id string, c session.Caller) error { - client := NewAuthClient(c.Conn()) - - resp, err = client.FetchToken(ctx, req) - if err != nil { - return err - } - return nil - }) - if err != nil { - return nil, err - } - return resp, nil -} - -func VerifyTokenAuthority(ctx context.Context, host string, pubKey *[32]byte, sm *session.Manager, g session.Group) (sessionID string, ok bool, err error) { - var verified bool - err = sm.Any(ctx, g, func(ctx context.Context, id string, c session.Caller) error { - client := NewAuthClient(c.Conn()) - - payload := make([]byte, 32) - rand.Read(payload) - resp, err := client.VerifyTokenAuthority(ctx, &VerifyTokenAuthorityRequest{ - Host: host, - Salt: getSalt(), - Payload: payload, - }) - if err != nil { - if grpcerrors.Code(err) == codes.Unimplemented { - return nil - } - return err - } - var dt []byte - dt, ok = sign.Open(nil, resp.Signed, pubKey) - if ok && subtle.ConstantTimeCompare(dt, payload) == 1 { - verified = true - } - sessionID = id - return nil - }) - if err != nil { - return "", false, err - } - return sessionID, verified, nil -} - -func GetTokenAuthority(ctx context.Context, host string, sm *session.Manager, g session.Group) (sessionID string, pubKey *[32]byte, err error) { - err = sm.Any(ctx, g, func(ctx context.Context, id string, c session.Caller) error { - client := NewAuthClient(c.Conn()) - - resp, err := client.GetTokenAuthority(ctx, &GetTokenAuthorityRequest{ - Host: host, - Salt: getSalt(), - }) - if err != nil { - if grpcerrors.Code(err) == codes.Unimplemented || grpcerrors.Code(err) == codes.Unavailable { - return nil - } - return err - } - if len(resp.PublicKey) != 32 { - return errors.Errorf("invalid pubkey length %d", len(pubKey)) - } - - sessionID = id - pubKey = new([32]byte) - copy((*pubKey)[:], resp.PublicKey) - return nil - }) - if err != nil { - return "", nil, err - } - return sessionID, pubKey, nil -} diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go b/vendor/github.com/moby/buildkit/session/auth/auth.pb.go deleted file mode 100644 index e23a07fc8a2a..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/auth.pb.go +++ /dev/null @@ -1,2630 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: auth.proto - -package auth - -import ( - bytes "bytes" - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type CredentialsRequest struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` -} - -func (m *CredentialsRequest) Reset() { *m = CredentialsRequest{} } -func (*CredentialsRequest) ProtoMessage() {} -func (*CredentialsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{0} -} -func (m *CredentialsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CredentialsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CredentialsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CredentialsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CredentialsRequest.Merge(m, src) -} -func (m *CredentialsRequest) XXX_Size() int { - return m.Size() -} -func (m *CredentialsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CredentialsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CredentialsRequest proto.InternalMessageInfo - -func (m *CredentialsRequest) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -type CredentialsResponse struct { - Username string `protobuf:"bytes,1,opt,name=Username,proto3" json:"Username,omitempty"` - Secret string `protobuf:"bytes,2,opt,name=Secret,proto3" json:"Secret,omitempty"` -} - -func (m *CredentialsResponse) Reset() { *m = CredentialsResponse{} } -func (*CredentialsResponse) ProtoMessage() {} -func (*CredentialsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{1} -} -func (m *CredentialsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CredentialsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CredentialsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CredentialsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CredentialsResponse.Merge(m, src) -} -func (m *CredentialsResponse) XXX_Size() int { - return m.Size() -} -func (m *CredentialsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CredentialsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CredentialsResponse proto.InternalMessageInfo - -func (m *CredentialsResponse) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *CredentialsResponse) GetSecret() string { - if m != nil { - return m.Secret - } - return "" -} - -type FetchTokenRequest struct { - ClientID string `protobuf:"bytes,1,opt,name=ClientID,proto3" json:"ClientID,omitempty"` - Host string `protobuf:"bytes,2,opt,name=Host,proto3" json:"Host,omitempty"` - Realm string `protobuf:"bytes,3,opt,name=Realm,proto3" json:"Realm,omitempty"` - Service string `protobuf:"bytes,4,opt,name=Service,proto3" json:"Service,omitempty"` - Scopes []string `protobuf:"bytes,5,rep,name=Scopes,proto3" json:"Scopes,omitempty"` -} - -func (m *FetchTokenRequest) Reset() { *m = FetchTokenRequest{} } -func (*FetchTokenRequest) ProtoMessage() {} -func (*FetchTokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{2} -} -func (m *FetchTokenRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FetchTokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FetchTokenRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FetchTokenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_FetchTokenRequest.Merge(m, src) -} -func (m *FetchTokenRequest) XXX_Size() int { - return m.Size() -} -func (m *FetchTokenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_FetchTokenRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_FetchTokenRequest proto.InternalMessageInfo - -func (m *FetchTokenRequest) GetClientID() string { - if m != nil { - return m.ClientID - } - return "" -} - -func (m *FetchTokenRequest) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *FetchTokenRequest) GetRealm() string { - if m != nil { - return m.Realm - } - return "" -} - -func (m *FetchTokenRequest) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -func (m *FetchTokenRequest) GetScopes() []string { - if m != nil { - return m.Scopes - } - return nil -} - -type FetchTokenResponse struct { - Token string `protobuf:"bytes,1,opt,name=Token,proto3" json:"Token,omitempty"` - ExpiresIn int64 `protobuf:"varint,2,opt,name=ExpiresIn,proto3" json:"ExpiresIn,omitempty"` - IssuedAt int64 `protobuf:"varint,3,opt,name=IssuedAt,proto3" json:"IssuedAt,omitempty"` -} - -func (m *FetchTokenResponse) Reset() { *m = FetchTokenResponse{} } -func (*FetchTokenResponse) ProtoMessage() {} -func (*FetchTokenResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{3} -} -func (m *FetchTokenResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FetchTokenResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FetchTokenResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FetchTokenResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_FetchTokenResponse.Merge(m, src) -} -func (m *FetchTokenResponse) XXX_Size() int { - return m.Size() -} -func (m *FetchTokenResponse) XXX_DiscardUnknown() { - xxx_messageInfo_FetchTokenResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_FetchTokenResponse proto.InternalMessageInfo - -func (m *FetchTokenResponse) GetToken() string { - if m != nil { - return m.Token - } - return "" -} - -func (m *FetchTokenResponse) GetExpiresIn() int64 { - if m != nil { - return m.ExpiresIn - } - return 0 -} - -func (m *FetchTokenResponse) GetIssuedAt() int64 { - if m != nil { - return m.IssuedAt - } - return 0 -} - -type GetTokenAuthorityRequest struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - Salt []byte `protobuf:"bytes,2,opt,name=Salt,proto3" json:"Salt,omitempty"` -} - -func (m *GetTokenAuthorityRequest) Reset() { *m = GetTokenAuthorityRequest{} } -func (*GetTokenAuthorityRequest) ProtoMessage() {} -func (*GetTokenAuthorityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{4} -} -func (m *GetTokenAuthorityRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetTokenAuthorityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetTokenAuthorityRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetTokenAuthorityRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTokenAuthorityRequest.Merge(m, src) -} -func (m *GetTokenAuthorityRequest) XXX_Size() int { - return m.Size() -} -func (m *GetTokenAuthorityRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetTokenAuthorityRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetTokenAuthorityRequest proto.InternalMessageInfo - -func (m *GetTokenAuthorityRequest) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *GetTokenAuthorityRequest) GetSalt() []byte { - if m != nil { - return m.Salt - } - return nil -} - -type GetTokenAuthorityResponse struct { - PublicKey []byte `protobuf:"bytes,1,opt,name=PublicKey,proto3" json:"PublicKey,omitempty"` -} - -func (m *GetTokenAuthorityResponse) Reset() { *m = GetTokenAuthorityResponse{} } -func (*GetTokenAuthorityResponse) ProtoMessage() {} -func (*GetTokenAuthorityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{5} -} -func (m *GetTokenAuthorityResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetTokenAuthorityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetTokenAuthorityResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetTokenAuthorityResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetTokenAuthorityResponse.Merge(m, src) -} -func (m *GetTokenAuthorityResponse) XXX_Size() int { - return m.Size() -} -func (m *GetTokenAuthorityResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetTokenAuthorityResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetTokenAuthorityResponse proto.InternalMessageInfo - -func (m *GetTokenAuthorityResponse) GetPublicKey() []byte { - if m != nil { - return m.PublicKey - } - return nil -} - -type VerifyTokenAuthorityRequest struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - Payload []byte `protobuf:"bytes,2,opt,name=Payload,proto3" json:"Payload,omitempty"` - Salt []byte `protobuf:"bytes,3,opt,name=Salt,proto3" json:"Salt,omitempty"` -} - -func (m *VerifyTokenAuthorityRequest) Reset() { *m = VerifyTokenAuthorityRequest{} } -func (*VerifyTokenAuthorityRequest) ProtoMessage() {} -func (*VerifyTokenAuthorityRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{6} -} -func (m *VerifyTokenAuthorityRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VerifyTokenAuthorityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VerifyTokenAuthorityRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VerifyTokenAuthorityRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyTokenAuthorityRequest.Merge(m, src) -} -func (m *VerifyTokenAuthorityRequest) XXX_Size() int { - return m.Size() -} -func (m *VerifyTokenAuthorityRequest) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyTokenAuthorityRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyTokenAuthorityRequest proto.InternalMessageInfo - -func (m *VerifyTokenAuthorityRequest) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *VerifyTokenAuthorityRequest) GetPayload() []byte { - if m != nil { - return m.Payload - } - return nil -} - -func (m *VerifyTokenAuthorityRequest) GetSalt() []byte { - if m != nil { - return m.Salt - } - return nil -} - -type VerifyTokenAuthorityResponse struct { - Signed []byte `protobuf:"bytes,1,opt,name=Signed,proto3" json:"Signed,omitempty"` -} - -func (m *VerifyTokenAuthorityResponse) Reset() { *m = VerifyTokenAuthorityResponse{} } -func (*VerifyTokenAuthorityResponse) ProtoMessage() {} -func (*VerifyTokenAuthorityResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_8bbd6f3875b0e874, []int{7} -} -func (m *VerifyTokenAuthorityResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VerifyTokenAuthorityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_VerifyTokenAuthorityResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *VerifyTokenAuthorityResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_VerifyTokenAuthorityResponse.Merge(m, src) -} -func (m *VerifyTokenAuthorityResponse) XXX_Size() int { - return m.Size() -} -func (m *VerifyTokenAuthorityResponse) XXX_DiscardUnknown() { - xxx_messageInfo_VerifyTokenAuthorityResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_VerifyTokenAuthorityResponse proto.InternalMessageInfo - -func (m *VerifyTokenAuthorityResponse) GetSigned() []byte { - if m != nil { - return m.Signed - } - return nil -} - -func init() { - proto.RegisterType((*CredentialsRequest)(nil), "moby.filesync.v1.CredentialsRequest") - proto.RegisterType((*CredentialsResponse)(nil), "moby.filesync.v1.CredentialsResponse") - proto.RegisterType((*FetchTokenRequest)(nil), "moby.filesync.v1.FetchTokenRequest") - proto.RegisterType((*FetchTokenResponse)(nil), "moby.filesync.v1.FetchTokenResponse") - proto.RegisterType((*GetTokenAuthorityRequest)(nil), "moby.filesync.v1.GetTokenAuthorityRequest") - proto.RegisterType((*GetTokenAuthorityResponse)(nil), "moby.filesync.v1.GetTokenAuthorityResponse") - proto.RegisterType((*VerifyTokenAuthorityRequest)(nil), "moby.filesync.v1.VerifyTokenAuthorityRequest") - proto.RegisterType((*VerifyTokenAuthorityResponse)(nil), "moby.filesync.v1.VerifyTokenAuthorityResponse") -} - -func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) } - -var fileDescriptor_8bbd6f3875b0e874 = []byte{ - // 513 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xbd, 0x75, 0xd2, 0x36, 0x43, 0x0f, 0x74, 0x89, 0x90, 0x31, 0xd1, 0xaa, 0x32, 0x45, - 0xaa, 0x40, 0x58, 0x02, 0x24, 0x24, 0xb8, 0xb5, 0xe5, 0x2b, 0xe2, 0x52, 0x39, 0x7c, 0x48, 0xbd, - 0x20, 0xc7, 0x9e, 0x12, 0x0b, 0xc7, 0x0e, 0xde, 0x75, 0x85, 0x6f, 0xdc, 0xb9, 0xf0, 0x08, 0x1c, - 0x79, 0x14, 0x8e, 0x39, 0xf6, 0x48, 0x9c, 0x0b, 0xc7, 0x3c, 0x02, 0xf2, 0x66, 0x9d, 0x04, 0x1c, - 0xd2, 0xdc, 0xfc, 0x1f, 0xff, 0x77, 0xe6, 0xb7, 0x33, 0xa3, 0x05, 0x70, 0x53, 0xd1, 0xb3, 0x07, - 0x49, 0x2c, 0x62, 0x7a, 0xb5, 0x1f, 0x77, 0x33, 0xfb, 0x2c, 0x08, 0x91, 0x67, 0x91, 0x67, 0x9f, - 0xdf, 0xb7, 0x0e, 0x80, 0x1e, 0x27, 0xe8, 0x63, 0x24, 0x02, 0x37, 0xe4, 0x0e, 0x7e, 0x4a, 0x91, - 0x0b, 0x4a, 0xa1, 0xf6, 0x32, 0xe6, 0xc2, 0x20, 0x7b, 0xe4, 0xa0, 0xe1, 0xc8, 0x6f, 0xab, 0x0d, - 0xd7, 0xfe, 0x72, 0xf2, 0x41, 0x1c, 0x71, 0xa4, 0x26, 0x6c, 0xbf, 0xe1, 0x98, 0x44, 0x6e, 0x1f, - 0x95, 0x7d, 0xa6, 0xe9, 0x75, 0xd8, 0xec, 0xa0, 0x97, 0xa0, 0x30, 0x36, 0xe4, 0x1f, 0xa5, 0xac, - 0xaf, 0x04, 0x76, 0x9f, 0xa3, 0xf0, 0x7a, 0xaf, 0xe3, 0x8f, 0x18, 0x95, 0x45, 0x4d, 0xd8, 0x3e, - 0x0e, 0x03, 0x8c, 0x44, 0xfb, 0x69, 0x99, 0xa9, 0xd4, 0x33, 0xa0, 0x8d, 0x39, 0x10, 0x6d, 0x42, - 0xdd, 0x41, 0x37, 0xec, 0x1b, 0xba, 0x0c, 0x4e, 0x05, 0x35, 0x60, 0xab, 0x83, 0xc9, 0x79, 0xe0, - 0xa1, 0x51, 0x93, 0xf1, 0x52, 0x4a, 0x1a, 0x2f, 0x1e, 0x20, 0x37, 0xea, 0x7b, 0xba, 0xa4, 0x91, - 0xca, 0xf2, 0x81, 0x2e, 0xc2, 0xa8, 0x7b, 0x35, 0xa1, 0x2e, 0x03, 0x0a, 0x65, 0x2a, 0x68, 0x0b, - 0x1a, 0xcf, 0x3e, 0x0f, 0x82, 0x04, 0x79, 0x3b, 0x92, 0x30, 0xba, 0x33, 0x0f, 0x14, 0x37, 0x68, - 0x73, 0x9e, 0xa2, 0x7f, 0x28, 0x24, 0x94, 0xee, 0xcc, 0xb4, 0x75, 0x04, 0xc6, 0x0b, 0x14, 0x32, - 0xcb, 0x61, 0x2a, 0x7a, 0x71, 0x12, 0x88, 0x6c, 0x45, 0xbb, 0x8b, 0x58, 0xc7, 0x0d, 0xa7, 0x37, - 0xde, 0x71, 0xe4, 0xb7, 0xf5, 0x18, 0x6e, 0x2c, 0xc9, 0xa1, 0x80, 0x5b, 0xd0, 0x38, 0x49, 0xbb, - 0x61, 0xe0, 0xbd, 0xc2, 0x4c, 0x66, 0xda, 0x71, 0xe6, 0x01, 0xeb, 0x3d, 0xdc, 0x7c, 0x8b, 0x49, - 0x70, 0x96, 0xad, 0x4f, 0x60, 0xc0, 0xd6, 0x89, 0x9b, 0x85, 0xb1, 0xeb, 0x2b, 0x88, 0x52, 0xce, - 0xd8, 0xf4, 0x05, 0xb6, 0x47, 0xd0, 0x5a, 0x5e, 0x40, 0xe1, 0x15, 0xdd, 0x0f, 0x3e, 0x44, 0xe8, - 0x2b, 0x36, 0xa5, 0x1e, 0x7c, 0xd7, 0xa1, 0x56, 0xb8, 0xe9, 0x29, 0x5c, 0x59, 0xd8, 0x2f, 0xba, - 0x6f, 0xff, 0xbb, 0xab, 0x76, 0x75, 0x51, 0xcd, 0xdb, 0x97, 0xb8, 0x54, 0xf1, 0x77, 0x00, 0xf3, - 0x11, 0xd3, 0x5b, 0xd5, 0x43, 0x95, 0x6d, 0x34, 0xf7, 0x57, 0x9b, 0x54, 0xe2, 0x10, 0x76, 0x2b, - 0x13, 0xa1, 0x77, 0xaa, 0x47, 0xff, 0x37, 0x7a, 0xf3, 0xee, 0x5a, 0x5e, 0x55, 0x2d, 0x85, 0xe6, - 0xb2, 0x1e, 0xd3, 0x7b, 0xd5, 0x24, 0x2b, 0x86, 0x6d, 0xda, 0xeb, 0xda, 0xa7, 0x65, 0x8f, 0x9e, - 0x0c, 0x47, 0x4c, 0xbb, 0x18, 0x31, 0x6d, 0x32, 0x62, 0xe4, 0x4b, 0xce, 0xc8, 0x8f, 0x9c, 0x91, - 0x9f, 0x39, 0x23, 0xc3, 0x9c, 0x91, 0x5f, 0x39, 0x23, 0xbf, 0x73, 0xa6, 0x4d, 0x72, 0x46, 0xbe, - 0x8d, 0x99, 0x36, 0x1c, 0x33, 0xed, 0x62, 0xcc, 0xb4, 0xd3, 0x5a, 0xf1, 0xee, 0x74, 0x37, 0xe5, - 0xc3, 0xf3, 0xf0, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xba, 0xb3, 0x18, 0x70, 0x86, 0x04, 0x00, - 0x00, -} - -func (this *CredentialsRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CredentialsRequest) - if !ok { - that2, ok := that.(CredentialsRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Host != that1.Host { - return false - } - return true -} -func (this *CredentialsResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CredentialsResponse) - if !ok { - that2, ok := that.(CredentialsResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Username != that1.Username { - return false - } - if this.Secret != that1.Secret { - return false - } - return true -} -func (this *FetchTokenRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*FetchTokenRequest) - if !ok { - that2, ok := that.(FetchTokenRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ClientID != that1.ClientID { - return false - } - if this.Host != that1.Host { - return false - } - if this.Realm != that1.Realm { - return false - } - if this.Service != that1.Service { - return false - } - if len(this.Scopes) != len(that1.Scopes) { - return false - } - for i := range this.Scopes { - if this.Scopes[i] != that1.Scopes[i] { - return false - } - } - return true -} -func (this *FetchTokenResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*FetchTokenResponse) - if !ok { - that2, ok := that.(FetchTokenResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Token != that1.Token { - return false - } - if this.ExpiresIn != that1.ExpiresIn { - return false - } - if this.IssuedAt != that1.IssuedAt { - return false - } - return true -} -func (this *GetTokenAuthorityRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetTokenAuthorityRequest) - if !ok { - that2, ok := that.(GetTokenAuthorityRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Host != that1.Host { - return false - } - if !bytes.Equal(this.Salt, that1.Salt) { - return false - } - return true -} -func (this *GetTokenAuthorityResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetTokenAuthorityResponse) - if !ok { - that2, ok := that.(GetTokenAuthorityResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.PublicKey, that1.PublicKey) { - return false - } - return true -} -func (this *VerifyTokenAuthorityRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*VerifyTokenAuthorityRequest) - if !ok { - that2, ok := that.(VerifyTokenAuthorityRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Host != that1.Host { - return false - } - if !bytes.Equal(this.Payload, that1.Payload) { - return false - } - if !bytes.Equal(this.Salt, that1.Salt) { - return false - } - return true -} -func (this *VerifyTokenAuthorityResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*VerifyTokenAuthorityResponse) - if !ok { - that2, ok := that.(VerifyTokenAuthorityResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Signed, that1.Signed) { - return false - } - return true -} -func (this *CredentialsRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&auth.CredentialsRequest{") - s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CredentialsResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&auth.CredentialsResponse{") - s = append(s, "Username: "+fmt.Sprintf("%#v", this.Username)+",\n") - s = append(s, "Secret: "+fmt.Sprintf("%#v", this.Secret)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FetchTokenRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 9) - s = append(s, "&auth.FetchTokenRequest{") - s = append(s, "ClientID: "+fmt.Sprintf("%#v", this.ClientID)+",\n") - s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") - s = append(s, "Realm: "+fmt.Sprintf("%#v", this.Realm)+",\n") - s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") - s = append(s, "Scopes: "+fmt.Sprintf("%#v", this.Scopes)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *FetchTokenResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&auth.FetchTokenResponse{") - s = append(s, "Token: "+fmt.Sprintf("%#v", this.Token)+",\n") - s = append(s, "ExpiresIn: "+fmt.Sprintf("%#v", this.ExpiresIn)+",\n") - s = append(s, "IssuedAt: "+fmt.Sprintf("%#v", this.IssuedAt)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetTokenAuthorityRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&auth.GetTokenAuthorityRequest{") - s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") - s = append(s, "Salt: "+fmt.Sprintf("%#v", this.Salt)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetTokenAuthorityResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&auth.GetTokenAuthorityResponse{") - s = append(s, "PublicKey: "+fmt.Sprintf("%#v", this.PublicKey)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *VerifyTokenAuthorityRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 7) - s = append(s, "&auth.VerifyTokenAuthorityRequest{") - s = append(s, "Host: "+fmt.Sprintf("%#v", this.Host)+",\n") - s = append(s, "Payload: "+fmt.Sprintf("%#v", this.Payload)+",\n") - s = append(s, "Salt: "+fmt.Sprintf("%#v", this.Salt)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *VerifyTokenAuthorityResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&auth.VerifyTokenAuthorityResponse{") - s = append(s, "Signed: "+fmt.Sprintf("%#v", this.Signed)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringAuth(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// AuthClient is the client API for Auth service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type AuthClient interface { - Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) - FetchToken(ctx context.Context, in *FetchTokenRequest, opts ...grpc.CallOption) (*FetchTokenResponse, error) - GetTokenAuthority(ctx context.Context, in *GetTokenAuthorityRequest, opts ...grpc.CallOption) (*GetTokenAuthorityResponse, error) - VerifyTokenAuthority(ctx context.Context, in *VerifyTokenAuthorityRequest, opts ...grpc.CallOption) (*VerifyTokenAuthorityResponse, error) -} - -type authClient struct { - cc *grpc.ClientConn -} - -func NewAuthClient(cc *grpc.ClientConn) AuthClient { - return &authClient{cc} -} - -func (c *authClient) Credentials(ctx context.Context, in *CredentialsRequest, opts ...grpc.CallOption) (*CredentialsResponse, error) { - out := new(CredentialsResponse) - err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/Credentials", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) FetchToken(ctx context.Context, in *FetchTokenRequest, opts ...grpc.CallOption) (*FetchTokenResponse, error) { - out := new(FetchTokenResponse) - err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/FetchToken", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) GetTokenAuthority(ctx context.Context, in *GetTokenAuthorityRequest, opts ...grpc.CallOption) (*GetTokenAuthorityResponse, error) { - out := new(GetTokenAuthorityResponse) - err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/GetTokenAuthority", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *authClient) VerifyTokenAuthority(ctx context.Context, in *VerifyTokenAuthorityRequest, opts ...grpc.CallOption) (*VerifyTokenAuthorityResponse, error) { - out := new(VerifyTokenAuthorityResponse) - err := c.cc.Invoke(ctx, "/moby.filesync.v1.Auth/VerifyTokenAuthority", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// AuthServer is the server API for Auth service. -type AuthServer interface { - Credentials(context.Context, *CredentialsRequest) (*CredentialsResponse, error) - FetchToken(context.Context, *FetchTokenRequest) (*FetchTokenResponse, error) - GetTokenAuthority(context.Context, *GetTokenAuthorityRequest) (*GetTokenAuthorityResponse, error) - VerifyTokenAuthority(context.Context, *VerifyTokenAuthorityRequest) (*VerifyTokenAuthorityResponse, error) -} - -// UnimplementedAuthServer can be embedded to have forward compatible implementations. -type UnimplementedAuthServer struct { -} - -func (*UnimplementedAuthServer) Credentials(ctx context.Context, req *CredentialsRequest) (*CredentialsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Credentials not implemented") -} -func (*UnimplementedAuthServer) FetchToken(ctx context.Context, req *FetchTokenRequest) (*FetchTokenResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method FetchToken not implemented") -} -func (*UnimplementedAuthServer) GetTokenAuthority(ctx context.Context, req *GetTokenAuthorityRequest) (*GetTokenAuthorityResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetTokenAuthority not implemented") -} -func (*UnimplementedAuthServer) VerifyTokenAuthority(ctx context.Context, req *VerifyTokenAuthorityRequest) (*VerifyTokenAuthorityResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method VerifyTokenAuthority not implemented") -} - -func RegisterAuthServer(s *grpc.Server, srv AuthServer) { - s.RegisterService(&_Auth_serviceDesc, srv) -} - -func _Auth_Credentials_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CredentialsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).Credentials(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.filesync.v1.Auth/Credentials", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).Credentials(ctx, req.(*CredentialsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_FetchToken_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(FetchTokenRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).FetchToken(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.filesync.v1.Auth/FetchToken", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).FetchToken(ctx, req.(*FetchTokenRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_GetTokenAuthority_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetTokenAuthorityRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).GetTokenAuthority(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.filesync.v1.Auth/GetTokenAuthority", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).GetTokenAuthority(ctx, req.(*GetTokenAuthorityRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Auth_VerifyTokenAuthority_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(VerifyTokenAuthorityRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(AuthServer).VerifyTokenAuthority(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.filesync.v1.Auth/VerifyTokenAuthority", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(AuthServer).VerifyTokenAuthority(ctx, req.(*VerifyTokenAuthorityRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Auth_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.filesync.v1.Auth", - HandlerType: (*AuthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Credentials", - Handler: _Auth_Credentials_Handler, - }, - { - MethodName: "FetchToken", - Handler: _Auth_FetchToken_Handler, - }, - { - MethodName: "GetTokenAuthority", - Handler: _Auth_GetTokenAuthority_Handler, - }, - { - MethodName: "VerifyTokenAuthority", - Handler: _Auth_VerifyTokenAuthority_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "auth.proto", -} - -func (m *CredentialsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CredentialsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CredentialsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CredentialsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CredentialsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CredentialsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Secret) > 0 { - i -= len(m.Secret) - copy(dAtA[i:], m.Secret) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Secret))) - i-- - dAtA[i] = 0x12 - } - if len(m.Username) > 0 { - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Username))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FetchTokenRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FetchTokenRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FetchTokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Scopes) > 0 { - for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Scopes[iNdEx]) - copy(dAtA[i:], m.Scopes[iNdEx]) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Scopes[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Service) > 0 { - i -= len(m.Service) - copy(dAtA[i:], m.Service) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Service))) - i-- - dAtA[i] = 0x22 - } - if len(m.Realm) > 0 { - i -= len(m.Realm) - copy(dAtA[i:], m.Realm) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Realm))) - i-- - dAtA[i] = 0x1a - } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0x12 - } - if len(m.ClientID) > 0 { - i -= len(m.ClientID) - copy(dAtA[i:], m.ClientID) - i = encodeVarintAuth(dAtA, i, uint64(len(m.ClientID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FetchTokenResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FetchTokenResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FetchTokenResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.IssuedAt != 0 { - i = encodeVarintAuth(dAtA, i, uint64(m.IssuedAt)) - i-- - dAtA[i] = 0x18 - } - if m.ExpiresIn != 0 { - i = encodeVarintAuth(dAtA, i, uint64(m.ExpiresIn)) - i-- - dAtA[i] = 0x10 - } - if len(m.Token) > 0 { - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Token))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetTokenAuthorityRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetTokenAuthorityRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetTokenAuthorityRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Salt) > 0 { - i -= len(m.Salt) - copy(dAtA[i:], m.Salt) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Salt))) - i-- - dAtA[i] = 0x12 - } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetTokenAuthorityResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetTokenAuthorityResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetTokenAuthorityResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PublicKey) > 0 { - i -= len(m.PublicKey) - copy(dAtA[i:], m.PublicKey) - i = encodeVarintAuth(dAtA, i, uint64(len(m.PublicKey))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VerifyTokenAuthorityRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VerifyTokenAuthorityRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VerifyTokenAuthorityRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Salt) > 0 { - i -= len(m.Salt) - copy(dAtA[i:], m.Salt) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Salt))) - i-- - dAtA[i] = 0x1a - } - if len(m.Payload) > 0 { - i -= len(m.Payload) - copy(dAtA[i:], m.Payload) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Payload))) - i-- - dAtA[i] = 0x12 - } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VerifyTokenAuthorityResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VerifyTokenAuthorityResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VerifyTokenAuthorityResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Signed) > 0 { - i -= len(m.Signed) - copy(dAtA[i:], m.Signed) - i = encodeVarintAuth(dAtA, i, uint64(len(m.Signed))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintAuth(dAtA []byte, offset int, v uint64) int { - offset -= sovAuth(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CredentialsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *CredentialsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Username) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Secret) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *FetchTokenRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ClientID) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Host) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Realm) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovAuth(uint64(l)) - } - } - return n -} - -func (m *FetchTokenResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Token) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - if m.ExpiresIn != 0 { - n += 1 + sovAuth(uint64(m.ExpiresIn)) - } - if m.IssuedAt != 0 { - n += 1 + sovAuth(uint64(m.IssuedAt)) - } - return n -} - -func (m *GetTokenAuthorityRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Salt) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *GetTokenAuthorityResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PublicKey) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *VerifyTokenAuthorityRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Payload) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - l = len(m.Salt) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func (m *VerifyTokenAuthorityResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Signed) - if l > 0 { - n += 1 + l + sovAuth(uint64(l)) - } - return n -} - -func sovAuth(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAuth(x uint64) (n int) { - return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CredentialsRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CredentialsRequest{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *CredentialsResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CredentialsResponse{`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, - `}`, - }, "") - return s -} -func (this *FetchTokenRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FetchTokenRequest{`, - `ClientID:` + fmt.Sprintf("%v", this.ClientID) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Realm:` + fmt.Sprintf("%v", this.Realm) + `,`, - `Service:` + fmt.Sprintf("%v", this.Service) + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `}`, - }, "") - return s -} -func (this *FetchTokenResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FetchTokenResponse{`, - `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, - `IssuedAt:` + fmt.Sprintf("%v", this.IssuedAt) + `,`, - `}`, - }, "") - return s -} -func (this *GetTokenAuthorityRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetTokenAuthorityRequest{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Salt:` + fmt.Sprintf("%v", this.Salt) + `,`, - `}`, - }, "") - return s -} -func (this *GetTokenAuthorityResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetTokenAuthorityResponse{`, - `PublicKey:` + fmt.Sprintf("%v", this.PublicKey) + `,`, - `}`, - }, "") - return s -} -func (this *VerifyTokenAuthorityRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VerifyTokenAuthorityRequest{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Payload:` + fmt.Sprintf("%v", this.Payload) + `,`, - `Salt:` + fmt.Sprintf("%v", this.Salt) + `,`, - `}`, - }, "") - return s -} -func (this *VerifyTokenAuthorityResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VerifyTokenAuthorityResponse{`, - `Signed:` + fmt.Sprintf("%v", this.Signed) + `,`, - `}`, - }, "") - return s -} -func valueToStringAuth(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CredentialsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CredentialsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CredentialsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CredentialsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CredentialsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CredentialsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Secret = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FetchTokenRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FetchTokenRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FetchTokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClientID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Realm", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Realm = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FetchTokenResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FetchTokenResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FetchTokenResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) - } - m.ExpiresIn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpiresIn |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IssuedAt", wireType) - } - m.IssuedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.IssuedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetTokenAuthorityRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetTokenAuthorityRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetTokenAuthorityRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Salt", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Salt = append(m.Salt[:0], dAtA[iNdEx:postIndex]...) - if m.Salt == nil { - m.Salt = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetTokenAuthorityResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetTokenAuthorityResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetTokenAuthorityResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PublicKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PublicKey = append(m.PublicKey[:0], dAtA[iNdEx:postIndex]...) - if m.PublicKey == nil { - m.PublicKey = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VerifyTokenAuthorityRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VerifyTokenAuthorityRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VerifyTokenAuthorityRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...) - if m.Payload == nil { - m.Payload = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Salt", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Salt = append(m.Salt[:0], dAtA[iNdEx:postIndex]...) - if m.Salt == nil { - m.Salt = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VerifyTokenAuthorityResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VerifyTokenAuthorityResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VerifyTokenAuthorityResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Signed", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuth - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthAuth - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthAuth - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Signed = append(m.Signed[:0], dAtA[iNdEx:postIndex]...) - if m.Signed == nil { - m.Signed = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuth(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuth - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuth(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuth - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAuth - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAuth - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAuth - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuth = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAuth = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/session/auth/auth.proto b/vendor/github.com/moby/buildkit/session/auth/auth.proto deleted file mode 100644 index 139b0d0e3903..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/auth.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package moby.filesync.v1; - -option go_package = "auth"; - -service Auth{ - rpc Credentials(CredentialsRequest) returns (CredentialsResponse); - rpc FetchToken(FetchTokenRequest) returns (FetchTokenResponse); - rpc GetTokenAuthority(GetTokenAuthorityRequest) returns (GetTokenAuthorityResponse); - rpc VerifyTokenAuthority(VerifyTokenAuthorityRequest) returns (VerifyTokenAuthorityResponse); -} - -message CredentialsRequest { - string Host = 1; -} - -message CredentialsResponse { - string Username = 1; - string Secret = 2; -} - -message FetchTokenRequest { - string ClientID = 1; - string Host = 2; - string Realm = 3; - string Service = 4; - repeated string Scopes = 5; -} - -message FetchTokenResponse { - string Token = 1; - int64 ExpiresIn = 2; // seconds - int64 IssuedAt = 3; // timestamp -} - -message GetTokenAuthorityRequest { - string Host = 1; - bytes Salt = 2; -} - -message GetTokenAuthorityResponse { - bytes PublicKey = 1; -} - -message VerifyTokenAuthorityRequest { - string Host = 1; - bytes Payload = 2; - bytes Salt = 3; -} - -message VerifyTokenAuthorityResponse { - bytes Signed = 1; -} diff --git a/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go b/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go deleted file mode 100644 index 2c316cadc85b..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go +++ /dev/null @@ -1,220 +0,0 @@ -package authprovider - -import ( - "context" - "crypto/ed25519" - "crypto/hmac" - "crypto/sha256" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "strconv" - "strings" - "sync" - "time" - - authutil "github.com/containerd/containerd/remotes/docker/auth" - remoteserrors "github.com/containerd/containerd/remotes/errors" - "github.com/docker/cli/cli/config" - "github.com/docker/cli/cli/config/configfile" - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/auth" - "github.com/moby/buildkit/util/progress/progresswriter" - "github.com/pkg/errors" - "golang.org/x/crypto/nacl/sign" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -func NewDockerAuthProvider(stderr io.Writer) session.Attachable { - return &authProvider{ - config: config.LoadDefaultConfigFile(stderr), - seeds: &tokenSeeds{dir: config.Dir()}, - loggerCache: map[string]struct{}{}, - } -} - -type authProvider struct { - config *configfile.ConfigFile - seeds *tokenSeeds - logger progresswriter.Logger - loggerCache map[string]struct{} - - // The need for this mutex is not well understood. - // Without it, the docker cli on OS X hangs when - // reading credentials from docker-credential-osxkeychain. - // See issue https://github.com/docker/cli/issues/1862 - mu sync.Mutex -} - -func (ap *authProvider) SetLogger(l progresswriter.Logger) { - ap.mu.Lock() - ap.logger = l - ap.mu.Unlock() -} - -func (ap *authProvider) Register(server *grpc.Server) { - auth.RegisterAuthServer(server, ap) -} - -func (ap *authProvider) FetchToken(ctx context.Context, req *auth.FetchTokenRequest) (rr *auth.FetchTokenResponse, err error) { - creds, err := ap.credentials(req.Host) - if err != nil { - return nil, err - } - - to := authutil.TokenOptions{ - Realm: req.Realm, - Service: req.Service, - Scopes: req.Scopes, - Username: creds.Username, - Secret: creds.Secret, - } - - if creds.Secret != "" { - done := func(progresswriter.SubLogger) error { - return err - } - defer func() { - err = errors.Wrap(err, "failed to fetch oauth token") - }() - ap.mu.Lock() - name := fmt.Sprintf("[auth] %v token for %s", strings.Join(trimScopePrefix(req.Scopes), " "), req.Host) - if _, ok := ap.loggerCache[name]; !ok { - progresswriter.Wrap(name, ap.logger, done) - } - ap.mu.Unlock() - // try GET first because Docker Hub does not support POST - // switch once support has landed - resp, err := authutil.FetchToken(ctx, http.DefaultClient, nil, to) - if err != nil { - var errStatus remoteserrors.ErrUnexpectedStatus - if errors.As(err, &errStatus) { - // retry with POST request - // As of September 2017, GCR is known to return 404. - // As of February 2018, JFrog Artifactory is known to return 401. - if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { - resp, err := authutil.FetchTokenWithOAuth(ctx, http.DefaultClient, nil, "buildkit-client", to) - if err != nil { - return nil, err - } - - return toTokenResponse(resp.AccessToken, resp.IssuedAt, resp.ExpiresIn), nil - } - } - return nil, err - } - return toTokenResponse(resp.Token, resp.IssuedAt, resp.ExpiresIn), nil - } - // do request anonymously - resp, err := authutil.FetchToken(ctx, http.DefaultClient, nil, to) - if err != nil { - return nil, errors.Wrap(err, "failed to fetch anonymous token") - } - return toTokenResponse(resp.Token, resp.IssuedAt, resp.ExpiresIn), nil -} - -func (ap *authProvider) credentials(host string) (*auth.CredentialsResponse, error) { - ap.mu.Lock() - defer ap.mu.Unlock() - if host == "registry-1.docker.io" { - host = "https://index.docker.io/v1/" - } - ac, err := ap.config.GetAuthConfig(host) - if err != nil { - return nil, err - } - res := &auth.CredentialsResponse{} - if ac.IdentityToken != "" { - res.Secret = ac.IdentityToken - } else { - res.Username = ac.Username - res.Secret = ac.Password - } - return res, nil -} - -func (ap *authProvider) Credentials(ctx context.Context, req *auth.CredentialsRequest) (*auth.CredentialsResponse, error) { - resp, err := ap.credentials(req.Host) - if err != nil || resp.Secret != "" { - ap.mu.Lock() - defer ap.mu.Unlock() - _, ok := ap.loggerCache[req.Host] - ap.loggerCache[req.Host] = struct{}{} - if !ok { - return resp, progresswriter.Wrap(fmt.Sprintf("[auth] sharing credentials for %s", req.Host), ap.logger, func(progresswriter.SubLogger) error { - return err - }) - } - } - return resp, err -} - -func (ap *authProvider) GetTokenAuthority(ctx context.Context, req *auth.GetTokenAuthorityRequest) (*auth.GetTokenAuthorityResponse, error) { - key, err := ap.getAuthorityKey(req.Host, req.Salt) - if err != nil { - return nil, err - } - - return &auth.GetTokenAuthorityResponse{PublicKey: key[32:]}, nil -} - -func (ap *authProvider) VerifyTokenAuthority(ctx context.Context, req *auth.VerifyTokenAuthorityRequest) (*auth.VerifyTokenAuthorityResponse, error) { - key, err := ap.getAuthorityKey(req.Host, req.Salt) - if err != nil { - return nil, err - } - - priv := new([64]byte) - copy((*priv)[:], key) - - return &auth.VerifyTokenAuthorityResponse{Signed: sign.Sign(nil, req.Payload, priv)}, nil -} - -func (ap *authProvider) getAuthorityKey(host string, salt []byte) (ed25519.PrivateKey, error) { - if v, err := strconv.ParseBool(os.Getenv("BUILDKIT_NO_CLIENT_TOKEN")); err == nil && v { - return nil, status.Errorf(codes.Unavailable, "client side tokens disabled") - } - - creds, err := ap.credentials(host) - if err != nil { - return nil, err - } - seed, err := ap.seeds.getSeed(host) - if err != nil { - return nil, err - } - - mac := hmac.New(sha256.New, salt) - if creds.Secret != "" { - mac.Write(seed) - enc := json.NewEncoder(mac) - enc.Encode(creds) - } - - sum := mac.Sum(nil) - - return ed25519.NewKeyFromSeed(sum[:ed25519.SeedSize]), nil -} - -func toTokenResponse(token string, issuedAt time.Time, expires int) *auth.FetchTokenResponse { - resp := &auth.FetchTokenResponse{ - Token: token, - ExpiresIn: int64(expires), - } - if !issuedAt.IsZero() { - resp.IssuedAt = issuedAt.Unix() - } - return resp -} - -func trimScopePrefix(scopes []string) []string { - out := make([]string, len(scopes)) - for i, s := range scopes { - out[i] = strings.TrimPrefix(s, "repository:") - } - return out -} diff --git a/vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go b/vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go deleted file mode 100644 index 3186fbf1f0dc..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go +++ /dev/null @@ -1,83 +0,0 @@ -package authprovider - -import ( - "crypto/rand" - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "sync" - "syscall" - - "github.com/gofrs/flock" - "github.com/pkg/errors" -) - -type tokenSeeds struct { - mu sync.Mutex - dir string - m map[string]seed -} - -type seed struct { - Seed []byte -} - -func (ts *tokenSeeds) getSeed(host string) ([]byte, error) { - ts.mu.Lock() - defer ts.mu.Unlock() - - if err := os.MkdirAll(ts.dir, 0755); err != nil { - return nil, err - } - - if ts.m == nil { - ts.m = map[string]seed{} - } - - l := flock.New(filepath.Join(ts.dir, ".token_seed.lock")) - if err := l.Lock(); err != nil { - if !errors.Is(err, syscall.EROFS) && errors.Is(err, syscall.EPERM) { - return nil, err - } - } else { - defer l.Unlock() - } - - fp := filepath.Join(ts.dir, ".token_seed") - - // we include client side randomness to avoid chosen plaintext attack from the daemon side - dt, err := ioutil.ReadFile(fp) - if err != nil { - if !errors.Is(err, os.ErrNotExist) && !errors.Is(err, syscall.ENOTDIR) { - return nil, err - } - } else { - // ignore error in case of crash during previous marshal - _ = json.Unmarshal(dt, &ts.m) - } - v, ok := ts.m[host] - if !ok { - v = seed{Seed: newSeed()} - } - - ts.m[host] = v - - dt, err = json.MarshalIndent(ts.m, "", " ") - if err != nil { - return nil, err - } - - if err := ioutil.WriteFile(fp, dt, 0600); err != nil { - if !errors.Is(err, syscall.EROFS) && !errors.Is(err, syscall.EPERM) { - return nil, err - } - } - return v.Seed, nil -} - -func newSeed() []byte { - b := make([]byte, 16) - rand.Read(b) - return b -} diff --git a/vendor/github.com/moby/buildkit/session/auth/generate.go b/vendor/github.com/moby/buildkit/session/auth/generate.go deleted file mode 100644 index 687aa7cc0b5b..000000000000 --- a/vendor/github.com/moby/buildkit/session/auth/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package auth - -//go:generate protoc --gogoslick_out=plugins=grpc:. auth.proto diff --git a/vendor/github.com/moby/buildkit/session/content/attachable.go b/vendor/github.com/moby/buildkit/session/content/attachable.go deleted file mode 100644 index 253b37a23e2f..000000000000 --- a/vendor/github.com/moby/buildkit/session/content/attachable.go +++ /dev/null @@ -1,132 +0,0 @@ -package content - -import ( - "context" - - api "github.com/containerd/containerd/api/services/content/v1" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/services/content/contentserver" - "github.com/moby/buildkit/session" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// GRPCHeaderID is a gRPC header for store ID -const GRPCHeaderID = "buildkit-attachable-store-id" - -type attachableContentStore struct { - stores map[string]content.Store -} - -func (cs *attachableContentStore) choose(ctx context.Context) (content.Store, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return nil, errors.Wrap(errdefs.ErrInvalidArgument, "request lacks metadata") - } - - values := md[GRPCHeaderID] - if len(values) == 0 { - return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "request lacks metadata %q", GRPCHeaderID) - } - id := values[0] - store, ok := cs.stores[id] - if !ok { - return nil, errors.Wrapf(errdefs.ErrNotFound, "unknown store %s", id) - } - return store, nil -} - -func (cs *attachableContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - store, err := cs.choose(ctx) - if err != nil { - return content.Info{}, err - } - return store.Info(ctx, dgst) -} - -func (cs *attachableContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - store, err := cs.choose(ctx) - if err != nil { - return content.Info{}, err - } - return store.Update(ctx, info, fieldpaths...) -} - -func (cs *attachableContentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { - store, err := cs.choose(ctx) - if err != nil { - return err - } - return store.Walk(ctx, fn, fs...) -} - -func (cs *attachableContentStore) Delete(ctx context.Context, dgst digest.Digest) error { - store, err := cs.choose(ctx) - if err != nil { - return err - } - return store.Delete(ctx, dgst) -} - -func (cs *attachableContentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { - store, err := cs.choose(ctx) - if err != nil { - return nil, err - } - return store.ListStatuses(ctx, fs...) -} - -func (cs *attachableContentStore) Status(ctx context.Context, ref string) (content.Status, error) { - store, err := cs.choose(ctx) - if err != nil { - return content.Status{}, err - } - return store.Status(ctx, ref) -} - -func (cs *attachableContentStore) Abort(ctx context.Context, ref string) error { - store, err := cs.choose(ctx) - if err != nil { - return err - } - return store.Abort(ctx, ref) -} - -func (cs *attachableContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - store, err := cs.choose(ctx) - if err != nil { - return nil, err - } - return store.Writer(ctx, opts...) -} - -func (cs *attachableContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - store, err := cs.choose(ctx) - if err != nil { - return nil, err - } - return store.ReaderAt(ctx, desc) -} - -type attachable struct { - service api.ContentServer -} - -// NewAttachable creates session.Attachable from aggregated stores. -// A key of the store map is an ID string that is used for choosing underlying store. -func NewAttachable(stores map[string]content.Store) session.Attachable { - store := &attachableContentStore{stores: stores} - service := contentserver.New(store) - a := attachable{ - service: service, - } - return &a -} - -func (a *attachable) Register(server *grpc.Server) { - api.RegisterContentServer(server, a.service) -} diff --git a/vendor/github.com/moby/buildkit/session/content/caller.go b/vendor/github.com/moby/buildkit/session/content/caller.go deleted file mode 100644 index 70e82130d717..000000000000 --- a/vendor/github.com/moby/buildkit/session/content/caller.go +++ /dev/null @@ -1,91 +0,0 @@ -package content - -import ( - "context" - - api "github.com/containerd/containerd/api/services/content/v1" - "github.com/containerd/containerd/content" - "github.com/containerd/containerd/content/proxy" - "github.com/moby/buildkit/session" - digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/pkg/errors" - "google.golang.org/grpc/metadata" -) - -type callerContentStore struct { - store content.Store - storeID string -} - -func (cs *callerContentStore) choose(ctx context.Context) context.Context { - nsheader := metadata.Pairs(GRPCHeaderID, cs.storeID) - md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context. - if !ok { - md = nsheader - } else { - // order ensures the latest is first in this list. - md = metadata.Join(nsheader, md) - } - return metadata.NewOutgoingContext(ctx, md) -} - -func (cs *callerContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { - ctx = cs.choose(ctx) - info, err := cs.store.Info(ctx, dgst) - return info, errors.WithStack(err) -} - -func (cs *callerContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { - ctx = cs.choose(ctx) - info, err := cs.store.Update(ctx, info, fieldpaths...) - return info, errors.WithStack(err) -} - -func (cs *callerContentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { - ctx = cs.choose(ctx) - return errors.WithStack(cs.store.Walk(ctx, fn, fs...)) -} - -func (cs *callerContentStore) Delete(ctx context.Context, dgst digest.Digest) error { - ctx = cs.choose(ctx) - return errors.WithStack(cs.store.Delete(ctx, dgst)) -} - -func (cs *callerContentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { - ctx = cs.choose(ctx) - resp, err := cs.store.ListStatuses(ctx, fs...) - return resp, errors.WithStack(err) -} - -func (cs *callerContentStore) Status(ctx context.Context, ref string) (content.Status, error) { - ctx = cs.choose(ctx) - st, err := cs.store.Status(ctx, ref) - return st, errors.WithStack(err) -} - -func (cs *callerContentStore) Abort(ctx context.Context, ref string) error { - ctx = cs.choose(ctx) - return errors.WithStack(cs.store.Abort(ctx, ref)) -} - -func (cs *callerContentStore) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { - ctx = cs.choose(ctx) - w, err := cs.store.Writer(ctx, opts...) - return w, errors.WithStack(err) -} - -func (cs *callerContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { - ctx = cs.choose(ctx) - ra, err := cs.store.ReaderAt(ctx, desc) - return ra, errors.WithStack(err) -} - -// NewCallerStore creates content.Store from session.Caller with specified storeID -func NewCallerStore(c session.Caller, storeID string) content.Store { - client := api.NewContentClient(c.Conn()) - return &callerContentStore{ - store: proxy.NewContentStore(client), - storeID: storeID, - } -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go deleted file mode 100644 index a03326f6d575..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go +++ /dev/null @@ -1,130 +0,0 @@ -package filesync - -import ( - "bufio" - "context" - io "io" - "os" - "time" - - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" - "google.golang.org/grpc" -) - -type Stream interface { - Context() context.Context - SendMsg(m interface{}) error - RecvMsg(m interface{}) error -} - -func sendDiffCopy(stream Stream, fs fsutil.FS, progress progressCb) error { - return errors.WithStack(fsutil.Send(stream.Context(), stream, fs, progress)) -} - -func newStreamWriter(stream grpc.ClientStream) io.WriteCloser { - wc := &streamWriterCloser{ClientStream: stream} - return &bufferedWriteCloser{Writer: bufio.NewWriter(wc), Closer: wc} -} - -type bufferedWriteCloser struct { - *bufio.Writer - io.Closer -} - -func (bwc *bufferedWriteCloser) Close() error { - if err := bwc.Writer.Flush(); err != nil { - return errors.WithStack(err) - } - return bwc.Closer.Close() -} - -type streamWriterCloser struct { - grpc.ClientStream -} - -func (wc *streamWriterCloser) Write(dt []byte) (int, error) { - if err := wc.ClientStream.SendMsg(&BytesMessage{Data: dt}); err != nil { - // SendMsg return EOF on remote errors - if errors.Is(err, io.EOF) { - if err := errors.WithStack(wc.ClientStream.RecvMsg(struct{}{})); err != nil { - return 0, err - } - } - return 0, errors.WithStack(err) - } - return len(dt), nil -} - -func (wc *streamWriterCloser) Close() error { - if err := wc.ClientStream.CloseSend(); err != nil { - return errors.WithStack(err) - } - // block until receiver is done - var bm BytesMessage - if err := wc.ClientStream.RecvMsg(&bm); err != io.EOF { - return errors.WithStack(err) - } - return nil -} - -func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) (err error) { - st := time.Now() - defer func() { - logrus.Debugf("diffcopy took: %v", time.Since(st)) - }() - var cf fsutil.ChangeFunc - var ch fsutil.ContentHasher - if cu != nil { - cu.MarkSupported(true) - cf = cu.HandleChange - ch = cu.ContentHasher() - } - defer func() { - // tracing wrapper requires close trigger even on clean eof - if err == nil { - ds.CloseSend() - } - }() - return errors.WithStack(fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ - NotifyHashed: cf, - ContentHasher: ch, - ProgressCb: progress, - Filter: fsutil.FilterFunc(filter), - })) -} - -func syncTargetDiffCopy(ds grpc.ServerStream, dest string) error { - if err := os.MkdirAll(dest, 0700); err != nil { - return errors.Wrapf(err, "failed to create synctarget dest dir %s", dest) - } - return errors.WithStack(fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{ - Merge: true, - Filter: func() func(string, *fstypes.Stat) bool { - uid := os.Getuid() - gid := os.Getgid() - return func(p string, st *fstypes.Stat) bool { - st.Uid = uint32(uid) - st.Gid = uint32(gid) - return true - } - }(), - })) -} - -func writeTargetFile(ds grpc.ServerStream, wc io.WriteCloser) error { - for { - bm := BytesMessage{} - if err := ds.RecvMsg(&bm); err != nil { - if errors.Is(err, io.EOF) { - return nil - } - return errors.WithStack(err) - } - if _, err := wc.Write(bm.Data); err != nil { - return errors.WithStack(err) - } - } -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go deleted file mode 100644 index af62d1c2c6f0..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ /dev/null @@ -1,326 +0,0 @@ -package filesync - -import ( - "context" - "fmt" - io "io" - "os" - "strings" - - "github.com/moby/buildkit/session" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil" - fstypes "github.com/tonistiigi/fsutil/types" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -const ( - keyOverrideExcludes = "override-excludes" - keyIncludePatterns = "include-patterns" - keyExcludePatterns = "exclude-patterns" - keyFollowPaths = "followpaths" - keyDirName = "dir-name" - keyExporterMetaPrefix = "exporter-md-" -) - -type fsSyncProvider struct { - dirs map[string]SyncedDir - p progressCb - doneCh chan error -} - -type SyncedDir struct { - Name string - Dir string - Excludes []string - Map func(string, *fstypes.Stat) bool -} - -// NewFSSyncProvider creates a new provider for sending files from client -func NewFSSyncProvider(dirs []SyncedDir) session.Attachable { - p := &fsSyncProvider{ - dirs: map[string]SyncedDir{}, - } - for _, d := range dirs { - p.dirs[d.Name] = d - } - return p -} - -func (sp *fsSyncProvider) Register(server *grpc.Server) { - RegisterFileSyncServer(server, sp) -} - -func (sp *fsSyncProvider) DiffCopy(stream FileSync_DiffCopyServer) error { - return sp.handle("diffcopy", stream) -} -func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error { - return sp.handle("tarstream", stream) -} - -func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retErr error) { - var pr *protocol - for _, p := range supportedProtocols { - if method == p.name && isProtoSupported(p.name) { - pr = &p - break - } - } - if pr == nil { - return errors.New("failed to negotiate protocol") - } - - opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object - - dirName := "" - name, ok := opts[keyDirName] - if ok && len(name) > 0 { - dirName = name[0] - } - - dir, ok := sp.dirs[dirName] - if !ok { - return status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName) - } - - excludes := opts[keyExcludePatterns] - if len(dir.Excludes) != 0 && (len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true") { - excludes = dir.Excludes - } - includes := opts[keyIncludePatterns] - - followPaths := opts[keyFollowPaths] - - var progress progressCb - if sp.p != nil { - progress = sp.p - sp.p = nil - } - - var doneCh chan error - if sp.doneCh != nil { - doneCh = sp.doneCh - sp.doneCh = nil - } - err := pr.sendFn(stream, fsutil.NewFS(dir.Dir, &fsutil.WalkOpt{ - ExcludePatterns: excludes, - IncludePatterns: includes, - FollowPaths: followPaths, - Map: dir.Map, - }), progress) - if doneCh != nil { - if err != nil { - doneCh <- err - } - close(doneCh) - } - return err -} - -func (sp *fsSyncProvider) SetNextProgressCallback(f func(int, bool), doneCh chan error) { - sp.p = f - sp.doneCh = doneCh -} - -type progressCb func(int, bool) - -type protocol struct { - name string - sendFn func(stream Stream, fs fsutil.FS, progress progressCb) error - recvFn func(stream grpc.ClientStream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error -} - -func isProtoSupported(p string) bool { - // TODO: this should be removed after testing if stability is confirmed - if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" { - return strings.EqualFold(p, override) - } - return true -} - -var supportedProtocols = []protocol{ - { - name: "diffcopy", - sendFn: sendDiffCopy, - recvFn: recvDiffCopy, - }, -} - -// FSSendRequestOpt defines options for FSSend request -type FSSendRequestOpt struct { - Name string - IncludePatterns []string - ExcludePatterns []string - FollowPaths []string - OverrideExcludes bool // deprecated: this is used by docker/cli for automatically loading .dockerignore from the directory - DestDir string - CacheUpdater CacheUpdater - ProgressCb func(int, bool) - Filter func(string, *fstypes.Stat) bool -} - -// CacheUpdater is an object capable of sending notifications for the cache hash changes -type CacheUpdater interface { - MarkSupported(bool) - HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error - ContentHasher() fsutil.ContentHasher -} - -// FSSync initializes a transfer of files -func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { - var pr *protocol - for _, p := range supportedProtocols { - if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { - pr = &p - break - } - } - if pr == nil { - return errors.New("no local sources enabled") - } - - opts := make(map[string][]string) - if opt.OverrideExcludes { - opts[keyOverrideExcludes] = []string{"true"} - } - - if opt.IncludePatterns != nil { - opts[keyIncludePatterns] = opt.IncludePatterns - } - - if opt.ExcludePatterns != nil { - opts[keyExcludePatterns] = opt.ExcludePatterns - } - - if opt.FollowPaths != nil { - opts[keyFollowPaths] = opt.FollowPaths - } - - opts[keyDirName] = []string{opt.Name} - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - client := NewFileSyncClient(c.Conn()) - - var stream grpc.ClientStream - - ctx = metadata.NewOutgoingContext(ctx, opts) - - switch pr.name { - case "tarstream": - cc, err := client.TarStream(ctx) - if err != nil { - return err - } - stream = cc - case "diffcopy": - cc, err := client.DiffCopy(ctx) - if err != nil { - return err - } - stream = cc - default: - panic(fmt.Sprintf("invalid protocol: %q", pr.name)) - } - - return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Filter) -} - -// NewFSSyncTargetDir allows writing into a directory -func NewFSSyncTargetDir(outdir string) session.Attachable { - p := &fsSyncTarget{ - outdir: outdir, - } - return p -} - -// NewFSSyncTarget allows writing into an io.WriteCloser -func NewFSSyncTarget(f func(map[string]string) (io.WriteCloser, error)) session.Attachable { - p := &fsSyncTarget{ - f: f, - } - return p -} - -type fsSyncTarget struct { - outdir string - f func(map[string]string) (io.WriteCloser, error) -} - -func (sp *fsSyncTarget) Register(server *grpc.Server) { - RegisterFileSendServer(server, sp) -} - -func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) (err error) { - if sp.outdir != "" { - return syncTargetDiffCopy(stream, sp.outdir) - } - - if sp.f == nil { - return errors.New("empty outfile and outdir") - } - opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object - md := map[string]string{} - for k, v := range opts { - if strings.HasPrefix(k, keyExporterMetaPrefix) { - md[strings.TrimPrefix(k, keyExporterMetaPrefix)] = strings.Join(v, ",") - } - } - wc, err := sp.f(md) - if err != nil { - return err - } - if wc == nil { - return status.Errorf(codes.AlreadyExists, "target already exists") - } - defer func() { - err1 := wc.Close() - if err != nil { - err = err1 - } - }() - return writeTargetFile(stream, wc) -} - -func CopyToCaller(ctx context.Context, fs fsutil.FS, c session.Caller, progress func(int, bool)) error { - method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") - if !c.Supports(method) { - return errors.Errorf("method %s not supported by the client", method) - } - - client := NewFileSendClient(c.Conn()) - - cc, err := client.DiffCopy(ctx) - if err != nil { - return errors.WithStack(err) - } - - return sendDiffCopy(cc, fs, progress) -} - -func CopyFileWriter(ctx context.Context, md map[string]string, c session.Caller) (io.WriteCloser, error) { - method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy") - if !c.Supports(method) { - return nil, errors.Errorf("method %s not supported by the client", method) - } - - client := NewFileSendClient(c.Conn()) - - opts := make(map[string][]string, len(md)) - for k, v := range md { - opts[keyExporterMetaPrefix+k] = []string{v} - } - - ctx = metadata.NewOutgoingContext(ctx, opts) - - cc, err := client.DiffCopy(ctx) - if err != nil { - return nil, errors.WithStack(err) - } - - return newStreamWriter(cc), nil -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go deleted file mode 100644 index 6110307abb0e..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go +++ /dev/null @@ -1,677 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: filesync.proto - -package filesync - -import ( - bytes "bytes" - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/tonistiigi/fsutil/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// BytesMessage contains a chunk of byte data -type BytesMessage struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *BytesMessage) Reset() { *m = BytesMessage{} } -func (*BytesMessage) ProtoMessage() {} -func (*BytesMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_d1042549f1f24495, []int{0} -} -func (m *BytesMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BytesMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesMessage.Merge(m, src) -} -func (m *BytesMessage) XXX_Size() int { - return m.Size() -} -func (m *BytesMessage) XXX_DiscardUnknown() { - xxx_messageInfo_BytesMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesMessage proto.InternalMessageInfo - -func (m *BytesMessage) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterType((*BytesMessage)(nil), "moby.filesync.v1.BytesMessage") -} - -func init() { proto.RegisterFile("filesync.proto", fileDescriptor_d1042549f1f24495) } - -var fileDescriptor_d1042549f1f24495 = []byte{ - // 281 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49, - 0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa, - 0xd4, 0x83, 0x0b, 0x96, 0x19, 0x4a, 0xe9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, - 0xe7, 0xea, 0x97, 0xe4, 0xe7, 0x65, 0x16, 0x97, 0x64, 0x66, 0xa6, 0x67, 0xea, 0xa7, 0x15, 0x97, - 0x96, 0x64, 0xe6, 0xe8, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x97, 0x67, 0x16, 0xa5, 0x42, 0x0c, - 0x50, 0x52, 0xe2, 0xe2, 0x71, 0xaa, 0x2c, 0x49, 0x2d, 0xf6, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, - 0x15, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, - 0xb3, 0x8d, 0x9a, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83, 0x2b, 0xf3, 0x92, 0x85, 0xac, - 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85, 0x44, 0xf4, 0x20, 0xc6, 0xea, - 0x81, 0x8d, 0xd5, 0x0b, 0x48, 0x4c, 0xce, 0x4e, 0x2d, 0x91, 0xc2, 0x2a, 0xaa, 0xc1, 0x68, 0xc0, - 0x28, 0x64, 0xcd, 0xc5, 0x19, 0x92, 0x58, 0x14, 0x5c, 0x52, 0x94, 0x9a, 0x98, 0x4b, 0xaa, 0x66, - 0xa3, 0x28, 0xa8, 0x23, 0x52, 0xf3, 0x52, 0x84, 0xfc, 0x90, 0x1c, 0x21, 0xa7, 0x87, 0x1e, 0x06, - 0x7a, 0xc8, 0x3e, 0x92, 0x22, 0x20, 0x0f, 0x32, 0xdb, 0xc9, 0xee, 0xc2, 0x43, 0x39, 0x86, 0x1b, - 0x0f, 0xe5, 0x18, 0x3e, 0x3c, 0x94, 0x63, 0x6c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, - 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, - 0xc3, 0x87, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, - 0x1c, 0x43, 0x14, 0x07, 0xcc, 0xcc, 0x24, 0x36, 0x70, 0x60, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, - 0xff, 0xe6, 0x17, 0x63, 0x59, 0x9f, 0x01, 0x00, 0x00, -} - -func (this *BytesMessage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BytesMessage) - if !ok { - that2, ok := that.(BytesMessage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *BytesMessage) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&filesync.BytesMessage{") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringFilesync(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// FileSyncClient is the client API for FileSync service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type FileSyncClient interface { - DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) - TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) -} - -type fileSyncClient struct { - cc *grpc.ClientConn -} - -func NewFileSyncClient(cc *grpc.ClientConn) FileSyncClient { - return &fileSyncClient{cc} -} - -func (c *fileSyncClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSync_DiffCopyClient, error) { - stream, err := c.cc.NewStream(ctx, &_FileSync_serviceDesc.Streams[0], "/moby.filesync.v1.FileSync/DiffCopy", opts...) - if err != nil { - return nil, err - } - x := &fileSyncDiffCopyClient{stream} - return x, nil -} - -type FileSync_DiffCopyClient interface { - Send(*types.Packet) error - Recv() (*types.Packet, error) - grpc.ClientStream -} - -type fileSyncDiffCopyClient struct { - grpc.ClientStream -} - -func (x *fileSyncDiffCopyClient) Send(m *types.Packet) error { - return x.ClientStream.SendMsg(m) -} - -func (x *fileSyncDiffCopyClient) Recv() (*types.Packet, error) { - m := new(types.Packet) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *fileSyncClient) TarStream(ctx context.Context, opts ...grpc.CallOption) (FileSync_TarStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_FileSync_serviceDesc.Streams[1], "/moby.filesync.v1.FileSync/TarStream", opts...) - if err != nil { - return nil, err - } - x := &fileSyncTarStreamClient{stream} - return x, nil -} - -type FileSync_TarStreamClient interface { - Send(*types.Packet) error - Recv() (*types.Packet, error) - grpc.ClientStream -} - -type fileSyncTarStreamClient struct { - grpc.ClientStream -} - -func (x *fileSyncTarStreamClient) Send(m *types.Packet) error { - return x.ClientStream.SendMsg(m) -} - -func (x *fileSyncTarStreamClient) Recv() (*types.Packet, error) { - m := new(types.Packet) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// FileSyncServer is the server API for FileSync service. -type FileSyncServer interface { - DiffCopy(FileSync_DiffCopyServer) error - TarStream(FileSync_TarStreamServer) error -} - -// UnimplementedFileSyncServer can be embedded to have forward compatible implementations. -type UnimplementedFileSyncServer struct { -} - -func (*UnimplementedFileSyncServer) DiffCopy(srv FileSync_DiffCopyServer) error { - return status.Errorf(codes.Unimplemented, "method DiffCopy not implemented") -} -func (*UnimplementedFileSyncServer) TarStream(srv FileSync_TarStreamServer) error { - return status.Errorf(codes.Unimplemented, "method TarStream not implemented") -} - -func RegisterFileSyncServer(s *grpc.Server, srv FileSyncServer) { - s.RegisterService(&_FileSync_serviceDesc, srv) -} - -func _FileSync_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(FileSyncServer).DiffCopy(&fileSyncDiffCopyServer{stream}) -} - -type FileSync_DiffCopyServer interface { - Send(*types.Packet) error - Recv() (*types.Packet, error) - grpc.ServerStream -} - -type fileSyncDiffCopyServer struct { - grpc.ServerStream -} - -func (x *fileSyncDiffCopyServer) Send(m *types.Packet) error { - return x.ServerStream.SendMsg(m) -} - -func (x *fileSyncDiffCopyServer) Recv() (*types.Packet, error) { - m := new(types.Packet) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func _FileSync_TarStream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(FileSyncServer).TarStream(&fileSyncTarStreamServer{stream}) -} - -type FileSync_TarStreamServer interface { - Send(*types.Packet) error - Recv() (*types.Packet, error) - grpc.ServerStream -} - -type fileSyncTarStreamServer struct { - grpc.ServerStream -} - -func (x *fileSyncTarStreamServer) Send(m *types.Packet) error { - return x.ServerStream.SendMsg(m) -} - -func (x *fileSyncTarStreamServer) Recv() (*types.Packet, error) { - m := new(types.Packet) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _FileSync_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.filesync.v1.FileSync", - HandlerType: (*FileSyncServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "DiffCopy", - Handler: _FileSync_DiffCopy_Handler, - ServerStreams: true, - ClientStreams: true, - }, - { - StreamName: "TarStream", - Handler: _FileSync_TarStream_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "filesync.proto", -} - -// FileSendClient is the client API for FileSend service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type FileSendClient interface { - DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) -} - -type fileSendClient struct { - cc *grpc.ClientConn -} - -func NewFileSendClient(cc *grpc.ClientConn) FileSendClient { - return &fileSendClient{cc} -} - -func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) { - stream, err := c.cc.NewStream(ctx, &_FileSend_serviceDesc.Streams[0], "/moby.filesync.v1.FileSend/DiffCopy", opts...) - if err != nil { - return nil, err - } - x := &fileSendDiffCopyClient{stream} - return x, nil -} - -type FileSend_DiffCopyClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type fileSendDiffCopyClient struct { - grpc.ClientStream -} - -func (x *fileSendDiffCopyClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// FileSendServer is the server API for FileSend service. -type FileSendServer interface { - DiffCopy(FileSend_DiffCopyServer) error -} - -// UnimplementedFileSendServer can be embedded to have forward compatible implementations. -type UnimplementedFileSendServer struct { -} - -func (*UnimplementedFileSendServer) DiffCopy(srv FileSend_DiffCopyServer) error { - return status.Errorf(codes.Unimplemented, "method DiffCopy not implemented") -} - -func RegisterFileSendServer(s *grpc.Server, srv FileSendServer) { - s.RegisterService(&_FileSend_serviceDesc, srv) -} - -func _FileSend_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(FileSendServer).DiffCopy(&fileSendDiffCopyServer{stream}) -} - -type FileSend_DiffCopyServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type fileSendDiffCopyServer struct { - grpc.ServerStream -} - -func (x *fileSendDiffCopyServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *fileSendDiffCopyServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _FileSend_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.filesync.v1.FileSend", - HandlerType: (*FileSendServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "DiffCopy", - Handler: _FileSend_DiffCopy_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "filesync.proto", -} - -func (m *BytesMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintFilesync(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintFilesync(dAtA []byte, offset int, v uint64) int { - offset -= sovFilesync(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *BytesMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovFilesync(uint64(l)) - } - return n -} - -func sovFilesync(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozFilesync(x uint64) (n int) { - return sovFilesync(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *BytesMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BytesMessage{`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringFilesync(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *BytesMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFilesync - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowFilesync - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthFilesync - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthFilesync - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipFilesync(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthFilesync - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipFilesync(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFilesync - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFilesync - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowFilesync - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthFilesync - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupFilesync - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthFilesync - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthFilesync = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowFilesync = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupFilesync = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto deleted file mode 100644 index 9e39179285ee..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package moby.filesync.v1; - -option go_package = "filesync"; - -import "github.com/tonistiigi/fsutil/types/wire.proto"; - -service FileSync{ - rpc DiffCopy(stream fsutil.types.Packet) returns (stream fsutil.types.Packet); - rpc TarStream(stream fsutil.types.Packet) returns (stream fsutil.types.Packet); -} - -service FileSend{ - rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage); -} - - -// BytesMessage contains a chunk of byte data -message BytesMessage{ - bytes data = 1; -} diff --git a/vendor/github.com/moby/buildkit/session/filesync/generate.go b/vendor/github.com/moby/buildkit/session/filesync/generate.go deleted file mode 100644 index fbd72742b856..000000000000 --- a/vendor/github.com/moby/buildkit/session/filesync/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package filesync - -//go:generate protoc -I=. -I=../../vendor/ -I=../../vendor/github.com/tonistiigi/fsutil/types/ --gogoslick_out=plugins=grpc:. filesync.proto diff --git a/vendor/github.com/moby/buildkit/session/group.go b/vendor/github.com/moby/buildkit/session/group.go deleted file mode 100644 index 4b9ba221f5fe..000000000000 --- a/vendor/github.com/moby/buildkit/session/group.go +++ /dev/null @@ -1,88 +0,0 @@ -package session - -import ( - "context" - "time" - - "github.com/pkg/errors" -) - -type Group interface { - SessionIterator() Iterator -} -type Iterator interface { - NextSession() string -} - -func NewGroup(ids ...string) Group { - return &group{ids: ids} -} - -type group struct { - ids []string -} - -func (g *group) SessionIterator() Iterator { - return &group{ids: g.ids} -} - -func (g *group) NextSession() string { - if len(g.ids) == 0 { - return "" - } - v := g.ids[0] - g.ids = g.ids[1:] - return v -} - -func AllSessionIDs(g Group) (out []string) { - if g == nil { - return nil - } - it := g.SessionIterator() - if it == nil { - return nil - } - for { - v := it.NextSession() - if v == "" { - return - } - out = append(out, v) - } -} - -func (sm *Manager) Any(ctx context.Context, g Group, f func(context.Context, string, Caller) error) error { - if g == nil { - return nil - } - - iter := g.SessionIterator() - if iter == nil { - return nil - } - - var lastErr error - for { - id := iter.NextSession() - if id == "" { - if lastErr != nil { - return lastErr - } - return errors.Errorf("no active sessions") - } - - timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - c, err := sm.Get(timeoutCtx, id, false) - if err != nil { - lastErr = err - continue - } - if err := f(ctx, id, c); err != nil { - lastErr = err - continue - } - return nil - } -} diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go deleted file mode 100644 index 728ea2e17c30..000000000000 --- a/vendor/github.com/moby/buildkit/session/grpc.go +++ /dev/null @@ -1,99 +0,0 @@ -package session - -import ( - "context" - "net" - "sync/atomic" - "time" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/moby/buildkit/util/grpcerrors" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" - "golang.org/x/net/http2" - "google.golang.org/grpc" - "google.golang.org/grpc/health/grpc_health_v1" -) - -func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { - go func() { - <-ctx.Done() - conn.Close() - }() - logrus.Debugf("serving grpc connection") - (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) -} - -func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc.ClientConn, error) { - var unary []grpc.UnaryClientInterceptor - var stream []grpc.StreamClientInterceptor - - var dialCount int64 - dialer := grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { - if c := atomic.AddInt64(&dialCount, 1); c > 1 { - return nil, errors.Errorf("only one connection allowed") - } - return conn, nil - }) - - dialOpts := []grpc.DialOption{ - dialer, - grpc.WithInsecure(), - } - - if span := opentracing.SpanFromContext(ctx); span != nil { - tracer := span.Tracer() - unary = append(unary, otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())) - stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())) - } - - unary = append(unary, grpcerrors.UnaryClientInterceptor) - stream = append(stream, grpcerrors.StreamClientInterceptor) - - if len(unary) == 1 { - dialOpts = append(dialOpts, grpc.WithUnaryInterceptor(unary[0])) - } else if len(unary) > 1 { - dialOpts = append(dialOpts, grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unary...))) - } - - if len(stream) == 1 { - dialOpts = append(dialOpts, grpc.WithStreamInterceptor(stream[0])) - } else if len(stream) > 1 { - dialOpts = append(dialOpts, grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(stream...))) - } - - cc, err := grpc.DialContext(ctx, "localhost", dialOpts...) - if err != nil { - return nil, nil, errors.Wrap(err, "failed to create grpc client") - } - - ctx, cancel := context.WithCancel(ctx) - go monitorHealth(ctx, cc, cancel) - - return ctx, cc, nil -} - -func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func()) { - defer cancelConn() - defer cc.Close() - - ticker := time.NewTicker(1 * time.Second) - defer ticker.Stop() - healthClient := grpc_health_v1.NewHealthClient(cc) - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - _, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) - cancel() - if err != nil { - return - } - } - } -} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go deleted file mode 100644 index 5f0cf3d77a39..000000000000 --- a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go +++ /dev/null @@ -1,167 +0,0 @@ -package grpchijack - -import ( - "context" - "io" - "net" - "strings" - "sync" - "time" - - controlapi "github.com/moby/buildkit/api/services/control" - "github.com/moby/buildkit/session" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -func Dialer(api controlapi.ControlClient) session.Dialer { - return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - - meta = lowerHeaders(meta) - - md := metadata.MD(meta) - - ctx = metadata.NewOutgoingContext(ctx, md) - - stream, err := api.Session(ctx) - if err != nil { - return nil, err - } - - c, _ := streamToConn(stream) - return c, nil - } -} - -type stream interface { - Context() context.Context - SendMsg(m interface{}) error - RecvMsg(m interface{}) error -} - -func streamToConn(stream stream) (net.Conn, <-chan struct{}) { - closeCh := make(chan struct{}) - c := &conn{stream: stream, buf: make([]byte, 32*1<<10), closeCh: closeCh} - return c, closeCh -} - -type conn struct { - stream stream - buf []byte - lastBuf []byte - - closedOnce sync.Once - readMu sync.Mutex - writeMu sync.Mutex - closeCh chan struct{} -} - -func (c *conn) Read(b []byte) (n int, err error) { - c.readMu.Lock() - defer c.readMu.Unlock() - - if c.lastBuf != nil { - n := copy(b, c.lastBuf) - c.lastBuf = c.lastBuf[n:] - if len(c.lastBuf) == 0 { - c.lastBuf = nil - } - return n, nil - } - m := new(controlapi.BytesMessage) - m.Data = c.buf - - if err := c.stream.RecvMsg(m); err != nil { - return 0, err - } - c.buf = m.Data[:cap(m.Data)] - - n = copy(b, m.Data) - if n < len(m.Data) { - c.lastBuf = m.Data[n:] - } - - return n, nil -} - -func (c *conn) Write(b []byte) (int, error) { - c.writeMu.Lock() - defer c.writeMu.Unlock() - m := &controlapi.BytesMessage{Data: b} - if err := c.stream.SendMsg(m); err != nil { - return 0, err - } - return len(b), nil -} - -func (c *conn) Close() (err error) { - c.closedOnce.Do(func() { - defer func() { - close(c.closeCh) - }() - - if cs, ok := c.stream.(grpc.ClientStream); ok { - c.writeMu.Lock() - err = cs.CloseSend() - c.writeMu.Unlock() - if err != nil { - return - } - } - - c.readMu.Lock() - for { - m := new(controlapi.BytesMessage) - m.Data = c.buf - err = c.stream.RecvMsg(m) - if err != nil { - if err != io.EOF { - c.readMu.Unlock() - return - } - err = nil - break - } - c.buf = m.Data[:cap(m.Data)] - c.lastBuf = append(c.lastBuf, c.buf...) - } - c.readMu.Unlock() - - }) - return nil -} - -func (c *conn) LocalAddr() net.Addr { - return dummyAddr{} -} -func (c *conn) RemoteAddr() net.Addr { - return dummyAddr{} -} -func (c *conn) SetDeadline(t time.Time) error { - return nil -} -func (c *conn) SetReadDeadline(t time.Time) error { - return nil -} -func (c *conn) SetWriteDeadline(t time.Time) error { - return nil -} - -type dummyAddr struct { -} - -func (d dummyAddr) Network() string { - return "tcp" -} - -func (d dummyAddr) String() string { - return "localhost" -} - -func lowerHeaders(in map[string][]string) map[string][]string { - out := map[string][]string{} - for k := range in { - out[strings.ToLower(k)] = in[k] - } - return out -} diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go b/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go deleted file mode 100644 index 096a9e806f2f..000000000000 --- a/vendor/github.com/moby/buildkit/session/grpchijack/hijack.go +++ /dev/null @@ -1,15 +0,0 @@ -package grpchijack - -import ( - "net" - - controlapi "github.com/moby/buildkit/api/services/control" - "google.golang.org/grpc/metadata" -) - -// Hijack hijacks session to a connection. -func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) { - md, _ := metadata.FromIncomingContext(stream.Context()) - c, closeCh := streamToConn(stream) - return c, closeCh, md -} diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go deleted file mode 100644 index edac93063c38..000000000000 --- a/vendor/github.com/moby/buildkit/session/manager.go +++ /dev/null @@ -1,224 +0,0 @@ -package session - -import ( - "context" - "net" - "net/http" - "strings" - "sync" - - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -// Caller can invoke requests on the session -type Caller interface { - Context() context.Context - Supports(method string) bool - Conn() *grpc.ClientConn - Name() string - SharedKey() string -} - -type client struct { - Session - cc *grpc.ClientConn - supported map[string]struct{} -} - -// Manager is a controller for accessing currently active sessions -type Manager struct { - sessions map[string]*client - mu sync.Mutex - updateCondition *sync.Cond -} - -// NewManager returns a new Manager -func NewManager() (*Manager, error) { - sm := &Manager{ - sessions: make(map[string]*client), - } - sm.updateCondition = sync.NewCond(&sm.mu) - return sm, nil -} - -// HandleHTTPRequest handles an incoming HTTP request -func (sm *Manager) HandleHTTPRequest(ctx context.Context, w http.ResponseWriter, r *http.Request) error { - hijacker, ok := w.(http.Hijacker) - if !ok { - return errors.New("handler does not support hijack") - } - - id := r.Header.Get(headerSessionID) - - proto := r.Header.Get("Upgrade") - - sm.mu.Lock() - if _, ok := sm.sessions[id]; ok { - sm.mu.Unlock() - return errors.Errorf("session %s already exists", id) - } - - if proto == "" { - sm.mu.Unlock() - return errors.New("no upgrade proto in request") - } - - if proto != "h2c" { - sm.mu.Unlock() - return errors.Errorf("protocol %s not supported", proto) - } - - conn, _, err := hijacker.Hijack() - if err != nil { - sm.mu.Unlock() - return errors.Wrap(err, "failed to hijack connection") - } - - resp := &http.Response{ - StatusCode: http.StatusSwitchingProtocols, - ProtoMajor: 1, - ProtoMinor: 1, - Header: http.Header{}, - } - resp.Header.Set("Connection", "Upgrade") - resp.Header.Set("Upgrade", proto) - - // set raw mode - conn.Write([]byte{}) - resp.Write(conn) - - return sm.handleConn(ctx, conn, r.Header) -} - -// HandleConn handles an incoming raw connection -func (sm *Manager) HandleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { - sm.mu.Lock() - return sm.handleConn(ctx, conn, opts) -} - -// caller needs to take lock, this function will release it -func (sm *Manager) handleConn(ctx context.Context, conn net.Conn, opts map[string][]string) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - opts = canonicalHeaders(opts) - - h := http.Header(opts) - id := h.Get(headerSessionID) - name := h.Get(headerSessionName) - sharedKey := h.Get(headerSessionSharedKey) - - ctx, cc, err := grpcClientConn(ctx, conn) - if err != nil { - sm.mu.Unlock() - return err - } - - c := &client{ - Session: Session{ - id: id, - name: name, - sharedKey: sharedKey, - ctx: ctx, - cancelCtx: cancel, - done: make(chan struct{}), - }, - cc: cc, - supported: make(map[string]struct{}), - } - - for _, m := range opts[headerSessionMethod] { - c.supported[strings.ToLower(m)] = struct{}{} - } - sm.sessions[id] = c - sm.updateCondition.Broadcast() - sm.mu.Unlock() - - defer func() { - sm.mu.Lock() - delete(sm.sessions, id) - sm.mu.Unlock() - }() - - <-c.ctx.Done() - conn.Close() - close(c.done) - - return nil -} - -// Get returns a session by ID -func (sm *Manager) Get(ctx context.Context, id string, noWait bool) (Caller, error) { - // session prefix is used to identify vertexes with different contexts so - // they would not collide, but for lookup we don't need the prefix - if p := strings.SplitN(id, ":", 2); len(p) == 2 && len(p[1]) > 0 { - id = p[1] - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - go func() { - select { - case <-ctx.Done(): - sm.mu.Lock() - sm.updateCondition.Broadcast() - sm.mu.Unlock() - } - }() - - var c *client - - sm.mu.Lock() - for { - select { - case <-ctx.Done(): - sm.mu.Unlock() - return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id) - default: - } - var ok bool - c, ok = sm.sessions[id] - if (!ok || c.closed()) && !noWait { - sm.updateCondition.Wait() - continue - } - sm.mu.Unlock() - break - } - - if c == nil { - return nil, nil - } - - return c, nil -} - -func (c *client) Context() context.Context { - return c.context() -} - -func (c *client) Name() string { - return c.name -} - -func (c *client) SharedKey() string { - return c.sharedKey -} - -func (c *client) Supports(url string) bool { - _, ok := c.supported[strings.ToLower(url)] - return ok -} -func (c *client) Conn() *grpc.ClientConn { - return c.cc -} - -func canonicalHeaders(in map[string][]string) map[string][]string { - out := map[string][]string{} - for k := range in { - out[http.CanonicalHeaderKey(k)] = in[k] - } - return out -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/generate.go b/vendor/github.com/moby/buildkit/session/secrets/generate.go deleted file mode 100644 index 68716a95c665..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package secrets - -//go:generate protoc --gogoslick_out=plugins=grpc:. secrets.proto diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.go b/vendor/github.com/moby/buildkit/session/secrets/secrets.go deleted file mode 100644 index 604199df8e76..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secrets.go +++ /dev/null @@ -1,30 +0,0 @@ -package secrets - -import ( - "context" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/util/grpcerrors" - "github.com/pkg/errors" - "google.golang.org/grpc/codes" -) - -type SecretStore interface { - GetSecret(context.Context, string) ([]byte, error) -} - -var ErrNotFound = errors.Errorf("not found") - -func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error) { - client := NewSecretsClient(c.Conn()) - resp, err := client.GetSecret(ctx, &GetSecretRequest{ - ID: id, - }) - if err != nil { - if code := grpcerrors.Code(err); code == codes.Unimplemented || code == codes.NotFound { - return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id) - } - return nil, err - } - return resp.Data, nil -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go b/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go deleted file mode 100644 index 3fadef0f89c7..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secrets.pb.go +++ /dev/null @@ -1,880 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: secrets.proto - -package secrets - -import ( - bytes "bytes" - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type GetSecretRequest struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Annotations map[string]string `protobuf:"bytes,2,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} } -func (*GetSecretRequest) ProtoMessage() {} -func (*GetSecretRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_d4bc6c625e214507, []int{0} -} -func (m *GetSecretRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetSecretRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetSecretRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetSecretRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSecretRequest.Merge(m, src) -} -func (m *GetSecretRequest) XXX_Size() int { - return m.Size() -} -func (m *GetSecretRequest) XXX_DiscardUnknown() { - xxx_messageInfo_GetSecretRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSecretRequest proto.InternalMessageInfo - -func (m *GetSecretRequest) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *GetSecretRequest) GetAnnotations() map[string]string { - if m != nil { - return m.Annotations - } - return nil -} - -type GetSecretResponse struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} } -func (*GetSecretResponse) ProtoMessage() {} -func (*GetSecretResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d4bc6c625e214507, []int{1} -} -func (m *GetSecretResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetSecretResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GetSecretResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GetSecretResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetSecretResponse.Merge(m, src) -} -func (m *GetSecretResponse) XXX_Size() int { - return m.Size() -} -func (m *GetSecretResponse) XXX_DiscardUnknown() { - xxx_messageInfo_GetSecretResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_GetSecretResponse proto.InternalMessageInfo - -func (m *GetSecretResponse) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterType((*GetSecretRequest)(nil), "moby.buildkit.secrets.v1.GetSecretRequest") - proto.RegisterMapType((map[string]string)(nil), "moby.buildkit.secrets.v1.GetSecretRequest.AnnotationsEntry") - proto.RegisterType((*GetSecretResponse)(nil), "moby.buildkit.secrets.v1.GetSecretResponse") -} - -func init() { proto.RegisterFile("secrets.proto", fileDescriptor_d4bc6c625e214507) } - -var fileDescriptor_d4bc6c625e214507 = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2d, 0x4e, 0x4d, 0x2e, - 0x4a, 0x2d, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, - 0x4b, 0x2a, 0xcd, 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x83, 0x49, 0x96, 0x19, 0x2a, 0x1d, 0x64, - 0xe4, 0x12, 0x70, 0x4f, 0x2d, 0x09, 0x06, 0x8b, 0x04, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x08, - 0xf1, 0x71, 0x31, 0x79, 0xba, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x06, 0x31, 0x79, 0xba, 0x08, - 0xc5, 0x72, 0x71, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x30, - 0x29, 0x30, 0x6b, 0x70, 0x1b, 0x59, 0xeb, 0xe1, 0x32, 0x54, 0x0f, 0xdd, 0x40, 0x3d, 0x47, 0x84, - 0x6e, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x64, 0xf3, 0xa4, 0xec, 0xb8, 0x04, 0xd0, 0x15, 0x08, - 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x42, 0xdd, 0x00, 0x62, 0x0a, 0x89, 0x70, 0xb1, 0x96, 0x25, - 0xe6, 0x94, 0xa6, 0x4a, 0x30, 0x81, 0xc5, 0x20, 0x1c, 0x2b, 0x26, 0x0b, 0x46, 0x25, 0x75, 0x2e, - 0x41, 0x24, 0x1b, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x84, 0xb8, 0x58, 0x52, 0x12, 0x4b, - 0x12, 0xc1, 0x26, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xf9, 0x5c, 0xec, 0x10, 0x55, 0xc5, 0x42, 0x29, - 0x5c, 0x9c, 0x70, 0x3d, 0x42, 0x5a, 0xc4, 0x7b, 0x45, 0x4a, 0x9b, 0x28, 0xb5, 0x10, 0x47, 0x38, - 0xd9, 0x5e, 0x78, 0x28, 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, - 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, - 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, - 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x62, 0x87, 0x9a, 0x99, 0xc4, 0x06, 0x8e, - 0x3d, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x38, 0xec, 0x1f, 0xce, 0x01, 0x00, 0x00, -} - -func (this *GetSecretRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetSecretRequest) - if !ok { - that2, ok := that.(GetSecretRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ID != that1.ID { - return false - } - if len(this.Annotations) != len(that1.Annotations) { - return false - } - for i := range this.Annotations { - if this.Annotations[i] != that1.Annotations[i] { - return false - } - } - return true -} -func (this *GetSecretResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GetSecretResponse) - if !ok { - that2, ok := that.(GetSecretResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *GetSecretRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 6) - s = append(s, "&secrets.GetSecretRequest{") - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%#v: %#v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - if this.Annotations != nil { - s = append(s, "Annotations: "+mapStringForAnnotations+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func (this *GetSecretResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&secrets.GetSecretResponse{") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringSecrets(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// SecretsClient is the client API for Secrets service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SecretsClient interface { - GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) -} - -type secretsClient struct { - cc *grpc.ClientConn -} - -func NewSecretsClient(cc *grpc.ClientConn) SecretsClient { - return &secretsClient{cc} -} - -func (c *secretsClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) { - out := new(GetSecretResponse) - err := c.cc.Invoke(ctx, "/moby.buildkit.secrets.v1.Secrets/GetSecret", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// SecretsServer is the server API for Secrets service. -type SecretsServer interface { - GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) -} - -// UnimplementedSecretsServer can be embedded to have forward compatible implementations. -type UnimplementedSecretsServer struct { -} - -func (*UnimplementedSecretsServer) GetSecret(ctx context.Context, req *GetSecretRequest) (*GetSecretResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSecret not implemented") -} - -func RegisterSecretsServer(s *grpc.Server, srv SecretsServer) { - s.RegisterService(&_Secrets_serviceDesc, srv) -} - -func _Secrets_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetSecretRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SecretsServer).GetSecret(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.buildkit.secrets.v1.Secrets/GetSecret", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SecretsServer).GetSecret(ctx, req.(*GetSecretRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Secrets_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.buildkit.secrets.v1.Secrets", - HandlerType: (*SecretsServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetSecret", - Handler: _Secrets_GetSecret_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "secrets.proto", -} - -func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetSecretRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Annotations) > 0 { - for k := range m.Annotations { - v := m.Annotations[k] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintSecrets(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintSecrets(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintSecrets(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintSecrets(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetSecretResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintSecrets(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintSecrets(dAtA []byte, offset int, v uint64) int { - offset -= sovSecrets(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GetSecretRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovSecrets(uint64(l)) - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovSecrets(uint64(len(k))) + 1 + len(v) + sovSecrets(uint64(len(v))) - n += mapEntrySize + 1 + sovSecrets(uint64(mapEntrySize)) - } - } - return n -} - -func (m *GetSecretResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovSecrets(uint64(l)) - } - return n -} - -func sovSecrets(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSecrets(x uint64) (n int) { - return sovSecrets(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *GetSecretRequest) String() string { - if this == nil { - return "nil" - } - keysForAnnotations := make([]string, 0, len(this.Annotations)) - for k, _ := range this.Annotations { - keysForAnnotations = append(keysForAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - mapStringForAnnotations := "map[string]string{" - for _, k := range keysForAnnotations { - mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) - } - mapStringForAnnotations += "}" - s := strings.Join([]string{`&GetSecretRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Annotations:` + mapStringForAnnotations + `,`, - `}`, - }, "") - return s -} -func (this *GetSecretResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GetSecretResponse{`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringSecrets(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *GetSecretRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSecrets - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSecrets - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthSecrets - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthSecrets - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthSecrets - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthSecrets - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthSecrets - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthSecrets - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipSecrets(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSecrets - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSecrets(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSecrets - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetSecretResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSecrets - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSecrets - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSecrets - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSecrets(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSecrets - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSecrets(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSecrets - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSecrets - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSecrets - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSecrets - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSecrets - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSecrets - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSecrets = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSecrets = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSecrets = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.proto b/vendor/github.com/moby/buildkit/session/secrets/secrets.proto deleted file mode 100644 index 17d862450d9c..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secrets.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.secrets.v1; - -option go_package = "secrets"; - -service Secrets{ - rpc GetSecret(GetSecretRequest) returns (GetSecretResponse); -} - - -message GetSecretRequest { - string ID = 1; - map annotations = 2; -} - -message GetSecretResponse { - bytes data = 1; -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go b/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go deleted file mode 100644 index ea203bf02efd..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/secretsprovider.go +++ /dev/null @@ -1,60 +0,0 @@ -package secretsprovider - -import ( - "context" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/secrets" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// MaxSecretSize is the maximum byte length allowed for a secret -const MaxSecretSize = 500 * 1024 // 500KB - -func NewSecretProvider(store secrets.SecretStore) session.Attachable { - return &secretProvider{ - store: store, - } -} - -type secretProvider struct { - store secrets.SecretStore -} - -func (sp *secretProvider) Register(server *grpc.Server) { - secrets.RegisterSecretsServer(server, sp) -} - -func (sp *secretProvider) GetSecret(ctx context.Context, req *secrets.GetSecretRequest) (*secrets.GetSecretResponse, error) { - dt, err := sp.store.GetSecret(ctx, req.ID) - if err != nil { - if errors.Is(err, secrets.ErrNotFound) { - return nil, status.Errorf(codes.NotFound, err.Error()) - } - return nil, err - } - if l := len(dt); l > MaxSecretSize { - return nil, errors.Errorf("invalid secret size %d", l) - } - - return &secrets.GetSecretResponse{ - Data: dt, - }, nil -} - -func FromMap(m map[string][]byte) session.Attachable { - return NewSecretProvider(mapStore(m)) -} - -type mapStore map[string][]byte - -func (m mapStore) GetSecret(ctx context.Context, id string) ([]byte, error) { - v, ok := m[id] - if !ok { - return nil, errors.WithStack(secrets.ErrNotFound) - } - return v, nil -} diff --git a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/store.go b/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/store.go deleted file mode 100644 index 3a846f84e2b9..000000000000 --- a/vendor/github.com/moby/buildkit/session/secrets/secretsprovider/store.go +++ /dev/null @@ -1,65 +0,0 @@ -package secretsprovider - -import ( - "context" - "io/ioutil" - "os" - - "github.com/moby/buildkit/session/secrets" - "github.com/pkg/errors" - "github.com/tonistiigi/units" -) - -type Source struct { - ID string - FilePath string - Env string -} - -func NewStore(files []Source) (secrets.SecretStore, error) { - m := map[string]Source{} - for _, f := range files { - if f.ID == "" { - return nil, errors.Errorf("secret missing ID") - } - if f.Env == "" && f.FilePath == "" { - if _, ok := os.LookupEnv(f.ID); ok { - f.Env = f.ID - } else { - f.FilePath = f.ID - } - } - if f.FilePath != "" { - fi, err := os.Stat(f.FilePath) - if err != nil { - return nil, errors.Wrapf(err, "failed to stat %s", f.FilePath) - } - if fi.Size() > MaxSecretSize { - return nil, errors.Errorf("secret %s too big. max size %#.f", f.ID, MaxSecretSize*units.B) - } - } - m[f.ID] = f - } - return &fileStore{ - m: m, - }, nil -} - -type fileStore struct { - m map[string]Source -} - -func (fs *fileStore) GetSecret(ctx context.Context, id string) ([]byte, error) { - v, ok := fs.m[id] - if !ok { - return nil, errors.WithStack(secrets.ErrNotFound) - } - if v.Env != "" { - return []byte(os.Getenv(v.Env)), nil - } - dt, err := ioutil.ReadFile(v.FilePath) - if err != nil { - return nil, err - } - return dt, nil -} diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go deleted file mode 100644 index 02c7420a8e8e..000000000000 --- a/vendor/github.com/moby/buildkit/session/session.go +++ /dev/null @@ -1,161 +0,0 @@ -package session - -import ( - "context" - "net" - "strings" - - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" - "github.com/moby/buildkit/identity" - "github.com/moby/buildkit/util/grpcerrors" - opentracing "github.com/opentracing/opentracing-go" - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" -) - -const ( - headerSessionID = "X-Docker-Expose-Session-Uuid" - headerSessionName = "X-Docker-Expose-Session-Name" - headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey" - headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" -) - -// Dialer returns a connection that can be used by the session -type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) - -// Attachable defines a feature that can be exposed on a session -type Attachable interface { - Register(*grpc.Server) -} - -// Session is a long running connection between client and a daemon -type Session struct { - id string - name string - sharedKey string - ctx context.Context - cancelCtx func() - done chan struct{} - grpcServer *grpc.Server - conn net.Conn -} - -// NewSession returns a new long running session -func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) { - id := identity.NewID() - - var unary []grpc.UnaryServerInterceptor - var stream []grpc.StreamServerInterceptor - - serverOpts := []grpc.ServerOption{} - if span := opentracing.SpanFromContext(ctx); span != nil { - tracer := span.Tracer() - unary = append(unary, otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())) - stream = append(stream, otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())) - } - - unary = append(unary, grpcerrors.UnaryServerInterceptor) - stream = append(stream, grpcerrors.StreamServerInterceptor) - - if len(unary) == 1 { - serverOpts = append(serverOpts, grpc.UnaryInterceptor(unary[0])) - } else if len(unary) > 1 { - serverOpts = append(serverOpts, grpc.UnaryInterceptor(grpc_middleware.ChainUnaryServer(unary...))) - } - - if len(stream) == 1 { - serverOpts = append(serverOpts, grpc.StreamInterceptor(stream[0])) - } else if len(stream) > 1 { - serverOpts = append(serverOpts, grpc.StreamInterceptor(grpc_middleware.ChainStreamServer(stream...))) - } - - s := &Session{ - id: id, - name: name, - sharedKey: sharedKey, - grpcServer: grpc.NewServer(serverOpts...), - } - - grpc_health_v1.RegisterHealthServer(s.grpcServer, health.NewServer()) - - return s, nil -} - -// Allow enables a given service to be reachable through the grpc session -func (s *Session) Allow(a Attachable) { - a.Register(s.grpcServer) -} - -// ID returns unique identifier for the session -func (s *Session) ID() string { - return s.id -} - -// Run activates the session -func (s *Session) Run(ctx context.Context, dialer Dialer) error { - ctx, cancel := context.WithCancel(ctx) - s.cancelCtx = cancel - s.done = make(chan struct{}) - - defer cancel() - defer close(s.done) - - meta := make(map[string][]string) - meta[headerSessionID] = []string{s.id} - meta[headerSessionName] = []string{s.name} - meta[headerSessionSharedKey] = []string{s.sharedKey} - - for name, svc := range s.grpcServer.GetServiceInfo() { - for _, method := range svc.Methods { - meta[headerSessionMethod] = append(meta[headerSessionMethod], MethodURL(name, method.Name)) - } - } - conn, err := dialer(ctx, "h2c", meta) - if err != nil { - return errors.Wrap(err, "failed to dial gRPC") - } - s.conn = conn - serve(ctx, s.grpcServer, conn) - return nil -} - -// Close closes the session -func (s *Session) Close() error { - if s.cancelCtx != nil && s.done != nil { - if s.conn != nil { - s.conn.Close() - } - s.grpcServer.Stop() - <-s.done - } - return nil -} - -func (s *Session) context() context.Context { - return s.ctx -} - -func (s *Session) closed() bool { - select { - case <-s.context().Done(): - return true - default: - return false - } -} - -// MethodURL returns a gRPC method URL for service and method name -func MethodURL(s, m string) string { - return "/" + s + "/" + m -} - -func traceFilter() otgrpc.Option { - return otgrpc.IncludingSpans(func(parentSpanCtx opentracing.SpanContext, - method string, - req, resp interface{}) bool { - return !strings.HasSuffix(method, "Health/Check") - }) -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go deleted file mode 100644 index 6db414894923..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/copy.go +++ /dev/null @@ -1,69 +0,0 @@ -package sshforward - -import ( - io "io" - - "github.com/pkg/errors" - context "golang.org/x/net/context" - "golang.org/x/sync/errgroup" -) - -type Stream interface { - SendMsg(m interface{}) error - RecvMsg(m interface{}) error -} - -func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error { - g, ctx := errgroup.WithContext(ctx) - - g.Go(func() (retErr error) { - p := &BytesMessage{} - for { - if err := stream.RecvMsg(p); err != nil { - conn.Close() - if err == io.EOF { - return nil - } - return errors.WithStack(err) - } - select { - case <-ctx.Done(): - conn.Close() - return ctx.Err() - default: - } - if _, err := conn.Write(p.Data); err != nil { - conn.Close() - return errors.WithStack(err) - } - p.Data = p.Data[:0] - } - }) - - g.Go(func() (retErr error) { - for { - buf := make([]byte, 32*1024) - n, err := conn.Read(buf) - switch { - case err == io.EOF: - if closeStream != nil { - closeStream() - } - return nil - case err != nil: - return errors.WithStack(err) - } - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - p := &BytesMessage{Data: buf[:n]} - if err := stream.SendMsg(p); err != nil { - return errors.WithStack(err) - } - } - }) - - return g.Wait() -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/generate.go b/vendor/github.com/moby/buildkit/session/sshforward/generate.go deleted file mode 100644 index feecc7743c25..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package sshforward - -//go:generate protoc --gogoslick_out=plugins=grpc:. ssh.proto diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.go b/vendor/github.com/moby/buildkit/session/sshforward/ssh.go deleted file mode 100644 index a7a4c2e228a3..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/ssh.go +++ /dev/null @@ -1,118 +0,0 @@ -package sshforward - -import ( - "io/ioutil" - "net" - "os" - "path/filepath" - - "github.com/moby/buildkit/session" - "github.com/pkg/errors" - context "golang.org/x/net/context" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc/metadata" -) - -// DefaultID is the default ssh ID -const DefaultID = "default" - -const KeySSHID = "buildkit.ssh.id" - -type server struct { - caller session.Caller -} - -func (s *server) run(ctx context.Context, l net.Listener, id string) error { - eg, ctx := errgroup.WithContext(ctx) - - eg.Go(func() error { - <-ctx.Done() - return ctx.Err() - }) - - eg.Go(func() error { - for { - conn, err := l.Accept() - if err != nil { - return err - } - - client := NewSSHClient(s.caller.Conn()) - - opts := make(map[string][]string) - opts[KeySSHID] = []string{id} - ctx = metadata.NewOutgoingContext(ctx, opts) - - stream, err := client.ForwardAgent(ctx) - if err != nil { - conn.Close() - return err - } - - go Copy(ctx, conn, stream, stream.CloseSend) - } - }) - - return eg.Wait() -} - -type SocketOpt struct { - ID string - UID int - GID int - Mode int -} - -func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) { - dir, err := ioutil.TempDir("", ".buildkit-ssh-sock") - if err != nil { - return "", nil, errors.WithStack(err) - } - - defer func() { - if err != nil { - os.RemoveAll(dir) - } - }() - - if err := os.Chmod(dir, 0711); err != nil { - return "", nil, errors.WithStack(err) - } - - sockPath = filepath.Join(dir, "ssh_auth_sock") - - l, err := net.Listen("unix", sockPath) - if err != nil { - return "", nil, errors.WithStack(err) - } - - if err := os.Chown(sockPath, opt.UID, opt.GID); err != nil { - l.Close() - return "", nil, errors.WithStack(err) - } - if err := os.Chmod(sockPath, os.FileMode(opt.Mode)); err != nil { - l.Close() - return "", nil, errors.WithStack(err) - } - - s := &server{caller: c} - - id := opt.ID - if id == "" { - id = DefaultID - } - - go s.run(ctx, l, id) // erroring per connection allowed - - return sockPath, func() error { - err := l.Close() - os.RemoveAll(sockPath) - return errors.WithStack(err) - }, nil -} - -func CheckSSHID(ctx context.Context, c session.Caller, id string) error { - client := NewSSHClient(c.Conn()) - _, err := client.CheckAgent(ctx, &CheckAgentRequest{ID: id}) - return errors.WithStack(err) -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go b/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go deleted file mode 100644 index d2bda697c87a..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/ssh.pb.go +++ /dev/null @@ -1,909 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ssh.proto - -package sshforward - -import ( - bytes "bytes" - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// BytesMessage contains a chunk of byte data -type BytesMessage struct { - Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *BytesMessage) Reset() { *m = BytesMessage{} } -func (*BytesMessage) ProtoMessage() {} -func (*BytesMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_ef0eae71e2e883eb, []int{0} -} -func (m *BytesMessage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BytesMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BytesMessage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BytesMessage) XXX_Merge(src proto.Message) { - xxx_messageInfo_BytesMessage.Merge(m, src) -} -func (m *BytesMessage) XXX_Size() int { - return m.Size() -} -func (m *BytesMessage) XXX_DiscardUnknown() { - xxx_messageInfo_BytesMessage.DiscardUnknown(m) -} - -var xxx_messageInfo_BytesMessage proto.InternalMessageInfo - -func (m *BytesMessage) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -type CheckAgentRequest struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` -} - -func (m *CheckAgentRequest) Reset() { *m = CheckAgentRequest{} } -func (*CheckAgentRequest) ProtoMessage() {} -func (*CheckAgentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_ef0eae71e2e883eb, []int{1} -} -func (m *CheckAgentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckAgentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckAgentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CheckAgentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckAgentRequest.Merge(m, src) -} -func (m *CheckAgentRequest) XXX_Size() int { - return m.Size() -} -func (m *CheckAgentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CheckAgentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CheckAgentRequest proto.InternalMessageInfo - -func (m *CheckAgentRequest) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -type CheckAgentResponse struct { -} - -func (m *CheckAgentResponse) Reset() { *m = CheckAgentResponse{} } -func (*CheckAgentResponse) ProtoMessage() {} -func (*CheckAgentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_ef0eae71e2e883eb, []int{2} -} -func (m *CheckAgentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CheckAgentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CheckAgentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CheckAgentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CheckAgentResponse.Merge(m, src) -} -func (m *CheckAgentResponse) XXX_Size() int { - return m.Size() -} -func (m *CheckAgentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CheckAgentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CheckAgentResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*BytesMessage)(nil), "moby.sshforward.v1.BytesMessage") - proto.RegisterType((*CheckAgentRequest)(nil), "moby.sshforward.v1.CheckAgentRequest") - proto.RegisterType((*CheckAgentResponse)(nil), "moby.sshforward.v1.CheckAgentResponse") -} - -func init() { proto.RegisterFile("ssh.proto", fileDescriptor_ef0eae71e2e883eb) } - -var fileDescriptor_ef0eae71e2e883eb = []byte{ - // 252 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2c, 0x2e, 0xce, 0xd0, - 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xca, 0xcd, 0x4f, 0xaa, 0xd4, 0x2b, 0x2e, 0xce, 0x48, - 0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xd1, 0x2b, 0x33, 0x54, 0x52, 0xe2, 0xe2, 0x71, 0xaa, 0x2c, - 0x49, 0x2d, 0xf6, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0x15, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, - 0x49, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0x94, 0xb9, 0x04, 0x9d, 0x33, - 0x52, 0x93, 0xb3, 0x1d, 0xd3, 0x53, 0xf3, 0x4a, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, - 0xf8, 0xb8, 0x98, 0x3c, 0x5d, 0xc0, 0xca, 0x38, 0x83, 0x98, 0x3c, 0x5d, 0x94, 0x44, 0xb8, 0x84, - 0x90, 0x15, 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x1a, 0xed, 0x62, 0xe4, 0x62, 0x0e, 0x0e, 0xf6, - 0x10, 0x8a, 0xe6, 0xe2, 0x42, 0xc8, 0x0a, 0xa9, 0xea, 0x61, 0xba, 0x44, 0x0f, 0xc3, 0x0a, 0x29, - 0x35, 0x42, 0xca, 0x20, 0x96, 0x08, 0x85, 0x71, 0xf1, 0xb8, 0x41, 0x14, 0x40, 0x8c, 0x57, 0xc0, - 0xa6, 0x0f, 0xd9, 0x97, 0x52, 0x04, 0x55, 0x68, 0x30, 0x1a, 0x30, 0x3a, 0x39, 0x5c, 0x78, 0x28, - 0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, - 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, - 0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, - 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xe2, 0x42, 0x98, 0x9a, 0xc4, 0x06, 0x0e, 0x78, 0x63, 0x40, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x6c, 0xe6, 0x6d, 0xb7, 0x85, 0x01, 0x00, 0x00, -} - -func (this *BytesMessage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*BytesMessage) - if !ok { - that2, ok := that.(BytesMessage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *CheckAgentRequest) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CheckAgentRequest) - if !ok { - that2, ok := that.(CheckAgentRequest) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.ID != that1.ID { - return false - } - return true -} -func (this *CheckAgentResponse) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CheckAgentResponse) - if !ok { - that2, ok := that.(CheckAgentResponse) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - return true -} -func (this *BytesMessage) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&sshforward.BytesMessage{") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CheckAgentRequest) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 5) - s = append(s, "&sshforward.CheckAgentRequest{") - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func (this *CheckAgentResponse) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 4) - s = append(s, "&sshforward.CheckAgentResponse{") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringSsh(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// SSHClient is the client API for SSH service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type SSHClient interface { - CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) - ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) -} - -type sSHClient struct { - cc *grpc.ClientConn -} - -func NewSSHClient(cc *grpc.ClientConn) SSHClient { - return &sSHClient{cc} -} - -func (c *sSHClient) CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) { - out := new(CheckAgentResponse) - err := c.cc.Invoke(ctx, "/moby.sshforward.v1.SSH/CheckAgent", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *sSHClient) ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) { - stream, err := c.cc.NewStream(ctx, &_SSH_serviceDesc.Streams[0], "/moby.sshforward.v1.SSH/ForwardAgent", opts...) - if err != nil { - return nil, err - } - x := &sSHForwardAgentClient{stream} - return x, nil -} - -type SSH_ForwardAgentClient interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ClientStream -} - -type sSHForwardAgentClient struct { - grpc.ClientStream -} - -func (x *sSHForwardAgentClient) Send(m *BytesMessage) error { - return x.ClientStream.SendMsg(m) -} - -func (x *sSHForwardAgentClient) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// SSHServer is the server API for SSH service. -type SSHServer interface { - CheckAgent(context.Context, *CheckAgentRequest) (*CheckAgentResponse, error) - ForwardAgent(SSH_ForwardAgentServer) error -} - -// UnimplementedSSHServer can be embedded to have forward compatible implementations. -type UnimplementedSSHServer struct { -} - -func (*UnimplementedSSHServer) CheckAgent(ctx context.Context, req *CheckAgentRequest) (*CheckAgentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CheckAgent not implemented") -} -func (*UnimplementedSSHServer) ForwardAgent(srv SSH_ForwardAgentServer) error { - return status.Errorf(codes.Unimplemented, "method ForwardAgent not implemented") -} - -func RegisterSSHServer(s *grpc.Server, srv SSHServer) { - s.RegisterService(&_SSH_serviceDesc, srv) -} - -func _SSH_CheckAgent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CheckAgentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(SSHServer).CheckAgent(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/moby.sshforward.v1.SSH/CheckAgent", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(SSHServer).CheckAgent(ctx, req.(*CheckAgentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _SSH_ForwardAgent_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(SSHServer).ForwardAgent(&sSHForwardAgentServer{stream}) -} - -type SSH_ForwardAgentServer interface { - Send(*BytesMessage) error - Recv() (*BytesMessage, error) - grpc.ServerStream -} - -type sSHForwardAgentServer struct { - grpc.ServerStream -} - -func (x *sSHForwardAgentServer) Send(m *BytesMessage) error { - return x.ServerStream.SendMsg(m) -} - -func (x *sSHForwardAgentServer) Recv() (*BytesMessage, error) { - m := new(BytesMessage) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -var _SSH_serviceDesc = grpc.ServiceDesc{ - ServiceName: "moby.sshforward.v1.SSH", - HandlerType: (*SSHServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CheckAgent", - Handler: _SSH_CheckAgent_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "ForwardAgent", - Handler: _SSH_ForwardAgent_Handler, - ServerStreams: true, - ClientStreams: true, - }, - }, - Metadata: "ssh.proto", -} - -func (m *BytesMessage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BytesMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintSsh(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CheckAgentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckAgentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckAgentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintSsh(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *CheckAgentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CheckAgentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CheckAgentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintSsh(dAtA []byte, offset int, v uint64) int { - offset -= sovSsh(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *BytesMessage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Data) - if l > 0 { - n += 1 + l + sovSsh(uint64(l)) - } - return n -} - -func (m *CheckAgentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovSsh(uint64(l)) - } - return n -} - -func (m *CheckAgentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovSsh(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozSsh(x uint64) (n int) { - return sovSsh(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *BytesMessage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BytesMessage{`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *CheckAgentRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckAgentRequest{`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `}`, - }, "") - return s -} -func (this *CheckAgentResponse) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CheckAgentResponse{`, - `}`, - }, "") - return s -} -func valueToStringSsh(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *BytesMessage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSsh - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthSsh - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSsh(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSsh - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckAgentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckAgentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckAgentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthSsh - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthSsh - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSsh(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSsh - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CheckAgentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSsh - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CheckAgentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CheckAgentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipSsh(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthSsh - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSsh(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSsh - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSsh - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSsh - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthSsh - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupSsh - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthSsh - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthSsh = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSsh = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupSsh = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto b/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto deleted file mode 100644 index 99f63436a610..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/ssh.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; - -package moby.sshforward.v1; - -option go_package = "sshforward"; - -service SSH { - rpc CheckAgent(CheckAgentRequest) returns (CheckAgentResponse); - rpc ForwardAgent(stream BytesMessage) returns (stream BytesMessage); -} - -// BytesMessage contains a chunk of byte data -message BytesMessage{ - bytes data = 1; -} - -message CheckAgentRequest { - string ID = 1; -} - -message CheckAgentResponse { -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go b/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go deleted file mode 100644 index 981eb96f5628..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go +++ /dev/null @@ -1,244 +0,0 @@ -package sshprovider - -import ( - "context" - "io" - "io/ioutil" - "net" - "os" - "runtime" - "strings" - "time" - - "github.com/moby/buildkit/session" - "github.com/moby/buildkit/session/sshforward" - "github.com/pkg/errors" - "golang.org/x/crypto/ssh" - "golang.org/x/crypto/ssh/agent" - "golang.org/x/sync/errgroup" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// AgentConfig is the config for a single exposed SSH agent -type AgentConfig struct { - ID string - Paths []string -} - -// NewSSHAgentProvider creates a session provider that allows access to ssh agent -func NewSSHAgentProvider(confs []AgentConfig) (session.Attachable, error) { - m := map[string]source{} - for _, conf := range confs { - if len(conf.Paths) == 0 || len(conf.Paths) == 1 && conf.Paths[0] == "" { - conf.Paths = []string{os.Getenv("SSH_AUTH_SOCK")} - } - - if conf.Paths[0] == "" { - p, err := getFallbackAgentPath() - if err != nil { - return nil, errors.Wrap(err, "invalid empty ssh agent socket") - } - conf.Paths[0] = p - } - - src, err := toAgentSource(conf.Paths) - if err != nil { - return nil, err - } - if conf.ID == "" { - conf.ID = sshforward.DefaultID - } - if _, ok := m[conf.ID]; ok { - return nil, errors.Errorf("invalid duplicate ID %s", conf.ID) - } - m[conf.ID] = src - } - - return &socketProvider{m: m}, nil -} - -type source struct { - agent agent.Agent - socket *socketDialer -} - -type socketDialer struct { - path string - dialer func(string) (net.Conn, error) -} - -func (s socketDialer) Dial() (net.Conn, error) { - return s.dialer(s.path) -} - -func (s socketDialer) String() string { - return s.path -} - -type socketProvider struct { - m map[string]source -} - -func (sp *socketProvider) Register(server *grpc.Server) { - sshforward.RegisterSSHServer(server, sp) -} - -func (sp *socketProvider) CheckAgent(ctx context.Context, req *sshforward.CheckAgentRequest) (*sshforward.CheckAgentResponse, error) { - id := sshforward.DefaultID - if req.ID != "" { - id = req.ID - } - if _, ok := sp.m[id]; !ok { - return &sshforward.CheckAgentResponse{}, errors.Errorf("unset ssh forward key %s", id) - } - return &sshforward.CheckAgentResponse{}, nil -} - -func (sp *socketProvider) ForwardAgent(stream sshforward.SSH_ForwardAgentServer) error { - id := sshforward.DefaultID - - opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object - - if v, ok := opts[sshforward.KeySSHID]; ok && len(v) > 0 && v[0] != "" { - id = v[0] - } - - src, ok := sp.m[id] - if !ok { - return errors.Errorf("unset ssh forward key %s", id) - } - - var a agent.Agent - - if src.socket != nil { - conn, err := src.socket.Dial() - if err != nil { - return errors.Wrapf(err, "failed to connect to %s", src.socket) - } - - a = &readOnlyAgent{agent.NewClient(conn)} - defer conn.Close() - } else { - a = src.agent - } - - s1, s2 := sockPair() - - eg, ctx := errgroup.WithContext(context.TODO()) - - eg.Go(func() error { - return agent.ServeAgent(a, s1) - }) - - eg.Go(func() error { - defer s1.Close() - return sshforward.Copy(ctx, s2, stream, nil) - }) - - return eg.Wait() -} - -func toAgentSource(paths []string) (source, error) { - var keys bool - var socket *socketDialer - a := agent.NewKeyring() - for _, p := range paths { - if socket != nil { - return source{}, errors.New("only single socket allowed") - } - - if parsed := getWindowsPipeDialer(p); parsed != nil { - socket = parsed - continue - } - - fi, err := os.Stat(p) - if err != nil { - return source{}, errors.WithStack(err) - } - if fi.Mode()&os.ModeSocket > 0 { - socket = &socketDialer{path: p, dialer: unixSocketDialer} - continue - } - - f, err := os.Open(p) - if err != nil { - return source{}, errors.Wrapf(err, "failed to open %s", p) - } - dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024}) - if err != nil { - return source{}, errors.Wrapf(err, "failed to read %s", p) - } - - k, err := ssh.ParseRawPrivateKey(dt) - if err != nil { - // On Windows, os.ModeSocket isn't appropriately set on the file mode. - // https://github.com/golang/go/issues/33357 - // If parsing the file fails, check to see if it kind of looks like socket-shaped. - if runtime.GOOS == "windows" && strings.Contains(string(dt), "socket") { - if keys { - return source{}, errors.Errorf("invalid combination of keys and sockets") - } - socket = &socketDialer{path: p, dialer: unixSocketDialer} - continue - } - - return source{}, errors.Wrapf(err, "failed to parse %s", p) // TODO: prompt passphrase? - } - if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil { - return source{}, errors.Wrapf(err, "failed to add %s to agent", p) - } - - keys = true - } - - if socket != nil { - if keys { - return source{}, errors.Errorf("invalid combination of keys and sockets") - } - return source{socket: socket}, nil - } - - return source{agent: a}, nil -} - -func unixSocketDialer(path string) (net.Conn, error) { - return net.DialTimeout("unix", path, 2*time.Second) -} - -func sockPair() (io.ReadWriteCloser, io.ReadWriteCloser) { - pr1, pw1 := io.Pipe() - pr2, pw2 := io.Pipe() - return &sock{pr1, pw2, pw1}, &sock{pr2, pw1, pw2} -} - -type sock struct { - io.Reader - io.Writer - io.Closer -} - -type readOnlyAgent struct { - agent.ExtendedAgent -} - -func (a *readOnlyAgent) Add(_ agent.AddedKey) error { - return errors.Errorf("adding new keys not allowed by buildkit") -} - -func (a *readOnlyAgent) Remove(_ ssh.PublicKey) error { - return errors.Errorf("removing keys not allowed by buildkit") -} - -func (a *readOnlyAgent) RemoveAll() error { - return errors.Errorf("removing keys not allowed by buildkit") -} - -func (a *readOnlyAgent) Lock(_ []byte) error { - return errors.Errorf("locking agent not allowed by buildkit") -} - -func (a *readOnlyAgent) Extension(_ string, _ []byte) ([]byte, error) { - return nil, errors.Errorf("extensions not allowed by buildkit") -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_unix.go b/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_unix.go deleted file mode 100644 index 07b6b7b1e93d..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package sshprovider - -import ( - "github.com/pkg/errors" -) - -func getFallbackAgentPath() (string, error) { - return "", errors.Errorf("make sure SSH_AUTH_SOCK is set") -} - -func getWindowsPipeDialer(path string) *socketDialer { - return nil -} diff --git a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_windows.go b/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_windows.go deleted file mode 100644 index 812e273c2e4c..000000000000 --- a/vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider_windows.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build windows - -package sshprovider - -import ( - "net" - "regexp" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/pkg/errors" - "golang.org/x/sys/windows" -) - -// Returns the Windows OpenSSH agent named pipe path, but -// only if the agent is running. Returns an error otherwise. -func getFallbackAgentPath() (string, error) { - // Windows OpenSSH agent uses a named pipe rather - // than a UNIX socket. These pipes do not play nice - // with os.Stat (which tries to open its target), so - // use a FindFirstFile syscall to check for existence. - var fd windows.Win32finddata - - path := `\\.\pipe\openssh-ssh-agent` - pathPtr, _ := windows.UTF16PtrFromString(path) - handle, err := windows.FindFirstFile(pathPtr, &fd) - - if err != nil { - msg := "Windows OpenSSH agent not available at %s." + - " Enable the SSH agent service or set SSH_AUTH_SOCK." - return "", errors.Errorf(msg, path) - } - - _ = windows.CloseHandle(handle) - - return path, nil -} - -// Returns true if the path references a named pipe. -func isWindowsPipePath(path string) bool { - // If path matches \\*\pipe\* then it references a named pipe - // and requires winio.DialPipe() rather than DialTimeout("unix"). - // Slashes and backslashes may be used interchangeably in the path. - // Path separators may consist of multiple consecutive (back)slashes. - pipePattern := strings.ReplaceAll("^[/]{2}[^/]+[/]+pipe[/]+", "/", `\\/`) - ok, _ := regexp.MatchString(pipePattern, path) - return ok -} - -func getWindowsPipeDialer(path string) *socketDialer { - if isWindowsPipePath(path) { - return &socketDialer{path: path, dialer: windowsPipeDialer} - } - - return nil -} - -func windowsPipeDialer(path string) (net.Conn, error) { - return winio.DialPipe(path, nil) -} diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go deleted file mode 100644 index 0bf9603eb4f5..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/attr.go +++ /dev/null @@ -1,29 +0,0 @@ -package pb - -const AttrKeepGitDir = "git.keepgitdir" -const AttrFullRemoteURL = "git.fullurl" -const AttrAuthHeaderSecret = "git.authheadersecret" -const AttrAuthTokenSecret = "git.authtokensecret" -const AttrKnownSSHHosts = "git.knownsshhosts" -const AttrMountSSHSock = "git.mountsshsock" -const AttrLocalSessionID = "local.session" -const AttrLocalUniqueID = "local.unique" -const AttrIncludePatterns = "local.includepattern" -const AttrFollowPaths = "local.followpaths" -const AttrExcludePatterns = "local.excludepatterns" -const AttrSharedKeyHint = "local.sharedkeyhint" -const AttrLLBDefinitionFilename = "llbbuild.filename" - -const AttrHTTPChecksum = "http.checksum" -const AttrHTTPFilename = "http.filename" -const AttrHTTPPerm = "http.perm" -const AttrHTTPUID = "http.uid" -const AttrHTTPGID = "http.gid" - -const AttrImageResolveMode = "image.resolvemode" -const AttrImageResolveModeDefault = "default" -const AttrImageResolveModeForcePull = "pull" -const AttrImageResolveModePreferLocal = "local" -const AttrImageRecordType = "image.recordtype" - -type IsFileAction = isFileAction_Action diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go deleted file mode 100644 index aa9e39f9b777..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ /dev/null @@ -1,334 +0,0 @@ -package pb - -import "github.com/moby/buildkit/util/apicaps" - -var Caps apicaps.CapList - -// Every backwards or forwards non-compatible change needs to add a new capability row. -// By default new capabilities should be experimental. After merge a capability is -// considered immutable. After a capability is marked stable it should not be disabled. - -const ( - CapSourceImage apicaps.CapID = "source.image" - CapSourceImageResolveMode apicaps.CapID = "source.image.resolvemode" - CapSourceLocal apicaps.CapID = "source.local" - CapSourceLocalUnique apicaps.CapID = "source.local.unique" - CapSourceLocalSessionID apicaps.CapID = "source.local.sessionid" - CapSourceLocalIncludePatterns apicaps.CapID = "source.local.includepatterns" - CapSourceLocalFollowPaths apicaps.CapID = "source.local.followpaths" - CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns" - CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint" - - CapSourceGit apicaps.CapID = "source.git" - CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir" - CapSourceGitFullURL apicaps.CapID = "source.git.fullurl" - CapSourceGitHTTPAuth apicaps.CapID = "source.git.httpauth" - CapSourceGitKnownSSHHosts apicaps.CapID = "source.git.knownsshhosts" - CapSourceGitMountSSHSock apicaps.CapID = "source.git.mountsshsock" - CapSourceGitSubdir apicaps.CapID = "source.git.subdir" - - CapSourceHTTP apicaps.CapID = "source.http" - CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum" - CapSourceHTTPPerm apicaps.CapID = "source.http.perm" - CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" - - CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename" - - CapExecMetaBase apicaps.CapID = "exec.meta.base" - CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" - CapExecMetaNetwork apicaps.CapID = "exec.meta.network" - CapExecMetaSecurity apicaps.CapID = "exec.meta.security" - CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" - CapExecMountBind apicaps.CapID = "exec.mount.bind" - CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" - CapExecMountCache apicaps.CapID = "exec.mount.cache" - CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" - CapExecMountSelector apicaps.CapID = "exec.mount.selector" - CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs" - CapExecMountSecret apicaps.CapID = "exec.mount.secret" - CapExecMountSSH apicaps.CapID = "exec.mount.ssh" - CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" - - CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1" - - CapFileBase apicaps.CapID = "file.base" - CapFileRmWildcard apicaps.CapID = "file.rm.wildcard" - CapFileCopyIncludeExcludePatterns apicaps.CapID = "file.copy.includeexcludepatterns" - - CapConstraints apicaps.CapID = "constraints" - CapPlatform apicaps.CapID = "platform" - - CapMetaIgnoreCache apicaps.CapID = "meta.ignorecache" - CapMetaDescription apicaps.CapID = "meta.description" - CapMetaExportCache apicaps.CapID = "meta.exportcache" -) - -func init() { - - Caps.Init(apicaps.Cap{ - ID: CapSourceImage, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceImageResolveMode, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocal, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalUnique, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalSessionID, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalIncludePatterns, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalFollowPaths, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalExcludePatterns, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceLocalSharedKeyHint, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - Caps.Init(apicaps.Cap{ - ID: CapSourceGit, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitKeepDir, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitFullURL, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitHTTPAuth, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitKnownSSHHosts, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitMountSSHSock, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceGitSubdir, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTP, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTPChecksum, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTPPerm, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapSourceHTTPUIDGID, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapBuildOpLLBFileName, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaBase, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaProxy, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaNetwork, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaSetsDefaultPath, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaSecurity, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMetaSecurityDeviceWhitelistV1, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountBind, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountBindReadWriteNoOuput, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountCache, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountCacheSharing, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountSelector, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountTmpfs, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountSecret, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecMountSSH, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapExecCgroupsMounted, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapFileBase, - Enabled: true, - Status: apicaps.CapStatusPrerelease, - SupportedHint: map[string]string{ - "docker": "Docker v19.03", - "buildkit": "BuildKit v0.5.0", - }, - }) - - Caps.Init(apicaps.Cap{ - ID: CapFileRmWildcard, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapFileCopyIncludeExcludePatterns, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapConstraints, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapPlatform, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapMetaIgnoreCache, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapMetaDescription, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) - - Caps.Init(apicaps.Cap{ - ID: CapMetaExportCache, - Enabled: true, - Status: apicaps.CapStatusExperimental, - }) -} diff --git a/vendor/github.com/moby/buildkit/solver/pb/const.go b/vendor/github.com/moby/buildkit/solver/pb/const.go deleted file mode 100644 index c2d20b29f204..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/const.go +++ /dev/null @@ -1,25 +0,0 @@ -package pb - -// InputIndex is incrementing index to the input vertex -type InputIndex int64 - -// OutputIndex is incrementing index that another vertex can depend on -type OutputIndex int64 - -// RootMount is a base mountpoint -const RootMount = "/" - -// SkipOutput marks a disabled output index -const SkipOutput OutputIndex = -1 - -// Empty marks an input with no content -const Empty InputIndex = -1 - -// LLBBuilder is a special builder for BuildOp that directly builds LLB -const LLBBuilder InputIndex = -1 - -// LLBDefinitionInput marks an input that contains LLB definition for BuildOp -const LLBDefinitionInput = "buildkit.llb.definition" - -// LLBDefaultDefinitionFile is a filename containing the definition in LLBBuilder -const LLBDefaultDefinitionFile = LLBDefinitionInput diff --git a/vendor/github.com/moby/buildkit/solver/pb/generate.go b/vendor/github.com/moby/buildkit/solver/pb/generate.go deleted file mode 100644 index c31e148f2adf..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package pb - -//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. ops.proto diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go deleted file mode 100644 index 77feeac78108..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ /dev/null @@ -1,11333 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: ops.proto - -// Package pb provides the protobuf definition of LLB: low-level builder instruction. -// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. - -package pb - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - github_com_moby_buildkit_util_apicaps "github.com/moby/buildkit/util/apicaps" - github_com_opencontainers_go_digest "github.com/opencontainers/go-digest" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type NetMode int32 - -const ( - NetMode_UNSET NetMode = 0 - NetMode_HOST NetMode = 1 - NetMode_NONE NetMode = 2 -) - -var NetMode_name = map[int32]string{ - 0: "UNSET", - 1: "HOST", - 2: "NONE", -} - -var NetMode_value = map[string]int32{ - "UNSET": 0, - "HOST": 1, - "NONE": 2, -} - -func (x NetMode) String() string { - return proto.EnumName(NetMode_name, int32(x)) -} - -func (NetMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{0} -} - -type SecurityMode int32 - -const ( - SecurityMode_SANDBOX SecurityMode = 0 - SecurityMode_INSECURE SecurityMode = 1 -) - -var SecurityMode_name = map[int32]string{ - 0: "SANDBOX", - 1: "INSECURE", -} - -var SecurityMode_value = map[string]int32{ - "SANDBOX": 0, - "INSECURE": 1, -} - -func (x SecurityMode) String() string { - return proto.EnumName(SecurityMode_name, int32(x)) -} - -func (SecurityMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{1} -} - -// MountType defines a type of a mount from a supported set -type MountType int32 - -const ( - MountType_BIND MountType = 0 - MountType_SECRET MountType = 1 - MountType_SSH MountType = 2 - MountType_CACHE MountType = 3 - MountType_TMPFS MountType = 4 -) - -var MountType_name = map[int32]string{ - 0: "BIND", - 1: "SECRET", - 2: "SSH", - 3: "CACHE", - 4: "TMPFS", -} - -var MountType_value = map[string]int32{ - "BIND": 0, - "SECRET": 1, - "SSH": 2, - "CACHE": 3, - "TMPFS": 4, -} - -func (x MountType) String() string { - return proto.EnumName(MountType_name, int32(x)) -} - -func (MountType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{2} -} - -// CacheSharingOpt defines different sharing modes for cache mount -type CacheSharingOpt int32 - -const ( - // SHARED cache mount can be used concurrently by multiple writers - CacheSharingOpt_SHARED CacheSharingOpt = 0 - // PRIVATE creates a new mount if there are multiple writers - CacheSharingOpt_PRIVATE CacheSharingOpt = 1 - // LOCKED pauses second writer until first one releases the mount - CacheSharingOpt_LOCKED CacheSharingOpt = 2 -) - -var CacheSharingOpt_name = map[int32]string{ - 0: "SHARED", - 1: "PRIVATE", - 2: "LOCKED", -} - -var CacheSharingOpt_value = map[string]int32{ - "SHARED": 0, - "PRIVATE": 1, - "LOCKED": 2, -} - -func (x CacheSharingOpt) String() string { - return proto.EnumName(CacheSharingOpt_name, int32(x)) -} - -func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{3} -} - -// Op represents a vertex of the LLB DAG. -type Op struct { - // inputs is a set of input edges. - Inputs []*Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` - // Types that are valid to be assigned to Op: - // *Op_Exec - // *Op_Source - // *Op_File - // *Op_Build - Op isOp_Op `protobuf_oneof:"op"` - Platform *Platform `protobuf:"bytes,10,opt,name=platform,proto3" json:"platform,omitempty"` - Constraints *WorkerConstraints `protobuf:"bytes,11,opt,name=constraints,proto3" json:"constraints,omitempty"` -} - -func (m *Op) Reset() { *m = Op{} } -func (m *Op) String() string { return proto.CompactTextString(m) } -func (*Op) ProtoMessage() {} -func (*Op) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{0} -} -func (m *Op) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Op) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Op) XXX_Merge(src proto.Message) { - xxx_messageInfo_Op.Merge(m, src) -} -func (m *Op) XXX_Size() int { - return m.Size() -} -func (m *Op) XXX_DiscardUnknown() { - xxx_messageInfo_Op.DiscardUnknown(m) -} - -var xxx_messageInfo_Op proto.InternalMessageInfo - -type isOp_Op interface { - isOp_Op() - MarshalTo([]byte) (int, error) - Size() int -} - -type Op_Exec struct { - Exec *ExecOp `protobuf:"bytes,2,opt,name=exec,proto3,oneof" json:"exec,omitempty"` -} -type Op_Source struct { - Source *SourceOp `protobuf:"bytes,3,opt,name=source,proto3,oneof" json:"source,omitempty"` -} -type Op_File struct { - File *FileOp `protobuf:"bytes,4,opt,name=file,proto3,oneof" json:"file,omitempty"` -} -type Op_Build struct { - Build *BuildOp `protobuf:"bytes,5,opt,name=build,proto3,oneof" json:"build,omitempty"` -} - -func (*Op_Exec) isOp_Op() {} -func (*Op_Source) isOp_Op() {} -func (*Op_File) isOp_Op() {} -func (*Op_Build) isOp_Op() {} - -func (m *Op) GetOp() isOp_Op { - if m != nil { - return m.Op - } - return nil -} - -func (m *Op) GetInputs() []*Input { - if m != nil { - return m.Inputs - } - return nil -} - -func (m *Op) GetExec() *ExecOp { - if x, ok := m.GetOp().(*Op_Exec); ok { - return x.Exec - } - return nil -} - -func (m *Op) GetSource() *SourceOp { - if x, ok := m.GetOp().(*Op_Source); ok { - return x.Source - } - return nil -} - -func (m *Op) GetFile() *FileOp { - if x, ok := m.GetOp().(*Op_File); ok { - return x.File - } - return nil -} - -func (m *Op) GetBuild() *BuildOp { - if x, ok := m.GetOp().(*Op_Build); ok { - return x.Build - } - return nil -} - -func (m *Op) GetPlatform() *Platform { - if m != nil { - return m.Platform - } - return nil -} - -func (m *Op) GetConstraints() *WorkerConstraints { - if m != nil { - return m.Constraints - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*Op) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*Op_Exec)(nil), - (*Op_Source)(nil), - (*Op_File)(nil), - (*Op_Build)(nil), - } -} - -// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform -type Platform struct { - Architecture string `protobuf:"bytes,1,opt,name=Architecture,proto3" json:"Architecture,omitempty"` - OS string `protobuf:"bytes,2,opt,name=OS,proto3" json:"OS,omitempty"` - Variant string `protobuf:"bytes,3,opt,name=Variant,proto3" json:"Variant,omitempty"` - OSVersion string `protobuf:"bytes,4,opt,name=OSVersion,proto3" json:"OSVersion,omitempty"` - OSFeatures []string `protobuf:"bytes,5,rep,name=OSFeatures,proto3" json:"OSFeatures,omitempty"` -} - -func (m *Platform) Reset() { *m = Platform{} } -func (m *Platform) String() string { return proto.CompactTextString(m) } -func (*Platform) ProtoMessage() {} -func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{1} -} -func (m *Platform) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Platform) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Platform) XXX_Merge(src proto.Message) { - xxx_messageInfo_Platform.Merge(m, src) -} -func (m *Platform) XXX_Size() int { - return m.Size() -} -func (m *Platform) XXX_DiscardUnknown() { - xxx_messageInfo_Platform.DiscardUnknown(m) -} - -var xxx_messageInfo_Platform proto.InternalMessageInfo - -func (m *Platform) GetArchitecture() string { - if m != nil { - return m.Architecture - } - return "" -} - -func (m *Platform) GetOS() string { - if m != nil { - return m.OS - } - return "" -} - -func (m *Platform) GetVariant() string { - if m != nil { - return m.Variant - } - return "" -} - -func (m *Platform) GetOSVersion() string { - if m != nil { - return m.OSVersion - } - return "" -} - -func (m *Platform) GetOSFeatures() []string { - if m != nil { - return m.OSFeatures - } - return nil -} - -// Input represents an input edge for an Op. -type Input struct { - // digest of the marshaled input Op - Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` - // output index of the input Op - Index OutputIndex `protobuf:"varint,2,opt,name=index,proto3,customtype=OutputIndex" json:"index"` -} - -func (m *Input) Reset() { *m = Input{} } -func (m *Input) String() string { return proto.CompactTextString(m) } -func (*Input) ProtoMessage() {} -func (*Input) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{2} -} -func (m *Input) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Input) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Input) XXX_Merge(src proto.Message) { - xxx_messageInfo_Input.Merge(m, src) -} -func (m *Input) XXX_Size() int { - return m.Size() -} -func (m *Input) XXX_DiscardUnknown() { - xxx_messageInfo_Input.DiscardUnknown(m) -} - -var xxx_messageInfo_Input proto.InternalMessageInfo - -// ExecOp executes a command in a container. -type ExecOp struct { - Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` - Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` - Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` -} - -func (m *ExecOp) Reset() { *m = ExecOp{} } -func (m *ExecOp) String() string { return proto.CompactTextString(m) } -func (*ExecOp) ProtoMessage() {} -func (*ExecOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{3} -} -func (m *ExecOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExecOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecOp.Merge(m, src) -} -func (m *ExecOp) XXX_Size() int { - return m.Size() -} -func (m *ExecOp) XXX_DiscardUnknown() { - xxx_messageInfo_ExecOp.DiscardUnknown(m) -} - -var xxx_messageInfo_ExecOp proto.InternalMessageInfo - -func (m *ExecOp) GetMeta() *Meta { - if m != nil { - return m.Meta - } - return nil -} - -func (m *ExecOp) GetMounts() []*Mount { - if m != nil { - return m.Mounts - } - return nil -} - -func (m *ExecOp) GetNetwork() NetMode { - if m != nil { - return m.Network - } - return NetMode_UNSET -} - -func (m *ExecOp) GetSecurity() SecurityMode { - if m != nil { - return m.Security - } - return SecurityMode_SANDBOX -} - -// Meta is a set of arguments for ExecOp. -// Meta is unrelated to LLB metadata. -// FIXME: rename (ExecContext? ExecArgs?) -type Meta struct { - Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` - Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` - Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` - User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` - ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` - ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` - Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` -} - -func (m *Meta) Reset() { *m = Meta{} } -func (m *Meta) String() string { return proto.CompactTextString(m) } -func (*Meta) ProtoMessage() {} -func (*Meta) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{4} -} -func (m *Meta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Meta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Meta) XXX_Merge(src proto.Message) { - xxx_messageInfo_Meta.Merge(m, src) -} -func (m *Meta) XXX_Size() int { - return m.Size() -} -func (m *Meta) XXX_DiscardUnknown() { - xxx_messageInfo_Meta.DiscardUnknown(m) -} - -var xxx_messageInfo_Meta proto.InternalMessageInfo - -func (m *Meta) GetArgs() []string { - if m != nil { - return m.Args - } - return nil -} - -func (m *Meta) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -func (m *Meta) GetCwd() string { - if m != nil { - return m.Cwd - } - return "" -} - -func (m *Meta) GetUser() string { - if m != nil { - return m.User - } - return "" -} - -func (m *Meta) GetProxyEnv() *ProxyEnv { - if m != nil { - return m.ProxyEnv - } - return nil -} - -func (m *Meta) GetExtraHosts() []*HostIP { - if m != nil { - return m.ExtraHosts - } - return nil -} - -func (m *Meta) GetHostname() string { - if m != nil { - return m.Hostname - } - return "" -} - -// Mount specifies how to mount an input Op as a filesystem. -type Mount struct { - Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` - Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` - Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"` - Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"` - Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` - MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` - CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"` - SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"` - SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"` - ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"` -} - -func (m *Mount) Reset() { *m = Mount{} } -func (m *Mount) String() string { return proto.CompactTextString(m) } -func (*Mount) ProtoMessage() {} -func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{5} -} -func (m *Mount) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Mount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Mount) XXX_Merge(src proto.Message) { - xxx_messageInfo_Mount.Merge(m, src) -} -func (m *Mount) XXX_Size() int { - return m.Size() -} -func (m *Mount) XXX_DiscardUnknown() { - xxx_messageInfo_Mount.DiscardUnknown(m) -} - -var xxx_messageInfo_Mount proto.InternalMessageInfo - -func (m *Mount) GetSelector() string { - if m != nil { - return m.Selector - } - return "" -} - -func (m *Mount) GetDest() string { - if m != nil { - return m.Dest - } - return "" -} - -func (m *Mount) GetReadonly() bool { - if m != nil { - return m.Readonly - } - return false -} - -func (m *Mount) GetMountType() MountType { - if m != nil { - return m.MountType - } - return MountType_BIND -} - -func (m *Mount) GetCacheOpt() *CacheOpt { - if m != nil { - return m.CacheOpt - } - return nil -} - -func (m *Mount) GetSecretOpt() *SecretOpt { - if m != nil { - return m.SecretOpt - } - return nil -} - -func (m *Mount) GetSSHOpt() *SSHOpt { - if m != nil { - return m.SSHOpt - } - return nil -} - -func (m *Mount) GetResultID() string { - if m != nil { - return m.ResultID - } - return "" -} - -// CacheOpt defines options specific to cache mounts -type CacheOpt struct { - // ID is an optional namespace for the mount - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // Sharing is the sharing mode for the mount - Sharing CacheSharingOpt `protobuf:"varint,2,opt,name=sharing,proto3,enum=pb.CacheSharingOpt" json:"sharing,omitempty"` -} - -func (m *CacheOpt) Reset() { *m = CacheOpt{} } -func (m *CacheOpt) String() string { return proto.CompactTextString(m) } -func (*CacheOpt) ProtoMessage() {} -func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{6} -} -func (m *CacheOpt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CacheOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CacheOpt) XXX_Merge(src proto.Message) { - xxx_messageInfo_CacheOpt.Merge(m, src) -} -func (m *CacheOpt) XXX_Size() int { - return m.Size() -} -func (m *CacheOpt) XXX_DiscardUnknown() { - xxx_messageInfo_CacheOpt.DiscardUnknown(m) -} - -var xxx_messageInfo_CacheOpt proto.InternalMessageInfo - -func (m *CacheOpt) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *CacheOpt) GetSharing() CacheSharingOpt { - if m != nil { - return m.Sharing - } - return CacheSharingOpt_SHARED -} - -// SecretOpt defines options describing secret mounts -type SecretOpt struct { - // ID of secret. Used for quering the value. - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // UID of secret file - Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` - // GID of secret file - Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"` - // Mode is the filesystem mode of secret file - Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` - // Optional defines if secret value is required. Error is produced - // if value is not found and optional is false. - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` -} - -func (m *SecretOpt) Reset() { *m = SecretOpt{} } -func (m *SecretOpt) String() string { return proto.CompactTextString(m) } -func (*SecretOpt) ProtoMessage() {} -func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{7} -} -func (m *SecretOpt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SecretOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SecretOpt) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretOpt.Merge(m, src) -} -func (m *SecretOpt) XXX_Size() int { - return m.Size() -} -func (m *SecretOpt) XXX_DiscardUnknown() { - xxx_messageInfo_SecretOpt.DiscardUnknown(m) -} - -var xxx_messageInfo_SecretOpt proto.InternalMessageInfo - -func (m *SecretOpt) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *SecretOpt) GetUid() uint32 { - if m != nil { - return m.Uid - } - return 0 -} - -func (m *SecretOpt) GetGid() uint32 { - if m != nil { - return m.Gid - } - return 0 -} - -func (m *SecretOpt) GetMode() uint32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *SecretOpt) GetOptional() bool { - if m != nil { - return m.Optional - } - return false -} - -// SSHOpt defines options describing secret mounts -type SSHOpt struct { - // ID of exposed ssh rule. Used for quering the value. - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - // UID of agent socket - Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"` - // GID of agent socket - Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"` - // Mode is the filesystem mode of agent socket - Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` - // Optional defines if ssh socket is required. Error is produced - // if client does not expose ssh. - Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"` -} - -func (m *SSHOpt) Reset() { *m = SSHOpt{} } -func (m *SSHOpt) String() string { return proto.CompactTextString(m) } -func (*SSHOpt) ProtoMessage() {} -func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{8} -} -func (m *SSHOpt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SSHOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SSHOpt) XXX_Merge(src proto.Message) { - xxx_messageInfo_SSHOpt.Merge(m, src) -} -func (m *SSHOpt) XXX_Size() int { - return m.Size() -} -func (m *SSHOpt) XXX_DiscardUnknown() { - xxx_messageInfo_SSHOpt.DiscardUnknown(m) -} - -var xxx_messageInfo_SSHOpt proto.InternalMessageInfo - -func (m *SSHOpt) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *SSHOpt) GetUid() uint32 { - if m != nil { - return m.Uid - } - return 0 -} - -func (m *SSHOpt) GetGid() uint32 { - if m != nil { - return m.Gid - } - return 0 -} - -func (m *SSHOpt) GetMode() uint32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *SSHOpt) GetOptional() bool { - if m != nil { - return m.Optional - } - return false -} - -// SourceOp specifies a source such as build contexts and images. -type SourceOp struct { - // TODO: use source type or any type instead of URL protocol. - // identifier e.g. local://, docker-image://, git://, https://... - Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` - // attrs are defined in attr.go - Attrs map[string]string `protobuf:"bytes,2,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *SourceOp) Reset() { *m = SourceOp{} } -func (m *SourceOp) String() string { return proto.CompactTextString(m) } -func (*SourceOp) ProtoMessage() {} -func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{9} -} -func (m *SourceOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SourceOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SourceOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceOp.Merge(m, src) -} -func (m *SourceOp) XXX_Size() int { - return m.Size() -} -func (m *SourceOp) XXX_DiscardUnknown() { - xxx_messageInfo_SourceOp.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceOp proto.InternalMessageInfo - -func (m *SourceOp) GetIdentifier() string { - if m != nil { - return m.Identifier - } - return "" -} - -func (m *SourceOp) GetAttrs() map[string]string { - if m != nil { - return m.Attrs - } - return nil -} - -// BuildOp is used for nested build invocation. -// BuildOp is experimental and can break without backwards compatibility -type BuildOp struct { - Builder InputIndex `protobuf:"varint,1,opt,name=builder,proto3,customtype=InputIndex" json:"builder"` - Inputs map[string]*BuildInput `protobuf:"bytes,2,rep,name=inputs,proto3" json:"inputs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Def *Definition `protobuf:"bytes,3,opt,name=def,proto3" json:"def,omitempty"` - Attrs map[string]string `protobuf:"bytes,4,rep,name=attrs,proto3" json:"attrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *BuildOp) Reset() { *m = BuildOp{} } -func (m *BuildOp) String() string { return proto.CompactTextString(m) } -func (*BuildOp) ProtoMessage() {} -func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{10} -} -func (m *BuildOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BuildOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *BuildOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_BuildOp.Merge(m, src) -} -func (m *BuildOp) XXX_Size() int { - return m.Size() -} -func (m *BuildOp) XXX_DiscardUnknown() { - xxx_messageInfo_BuildOp.DiscardUnknown(m) -} - -var xxx_messageInfo_BuildOp proto.InternalMessageInfo - -func (m *BuildOp) GetInputs() map[string]*BuildInput { - if m != nil { - return m.Inputs - } - return nil -} - -func (m *BuildOp) GetDef() *Definition { - if m != nil { - return m.Def - } - return nil -} - -func (m *BuildOp) GetAttrs() map[string]string { - if m != nil { - return m.Attrs - } - return nil -} - -// BuildInput is used for BuildOp. -type BuildInput struct { - Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` -} - -func (m *BuildInput) Reset() { *m = BuildInput{} } -func (m *BuildInput) String() string { return proto.CompactTextString(m) } -func (*BuildInput) ProtoMessage() {} -func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{11} -} -func (m *BuildInput) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BuildInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *BuildInput) XXX_Merge(src proto.Message) { - xxx_messageInfo_BuildInput.Merge(m, src) -} -func (m *BuildInput) XXX_Size() int { - return m.Size() -} -func (m *BuildInput) XXX_DiscardUnknown() { - xxx_messageInfo_BuildInput.DiscardUnknown(m) -} - -var xxx_messageInfo_BuildInput proto.InternalMessageInfo - -// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. -type OpMetadata struct { - // ignore_cache specifies to ignore the cache for this Op. - IgnoreCache bool `protobuf:"varint,1,opt,name=ignore_cache,json=ignoreCache,proto3" json:"ignore_cache,omitempty"` - // Description can be used for keeping any text fields that builder doesn't parse - Description map[string]string `protobuf:"bytes,2,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // index 3 reserved for WorkerConstraint in previous versions - // WorkerConstraint worker_constraint = 3; - ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache,proto3" json:"export_cache,omitempty"` - Caps map[github_com_moby_buildkit_util_apicaps.CapID]bool `protobuf:"bytes,5,rep,name=caps,proto3,castkey=github.com/moby/buildkit/util/apicaps.CapID" json:"caps" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` -} - -func (m *OpMetadata) Reset() { *m = OpMetadata{} } -func (m *OpMetadata) String() string { return proto.CompactTextString(m) } -func (*OpMetadata) ProtoMessage() {} -func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{12} -} -func (m *OpMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OpMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OpMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_OpMetadata.Merge(m, src) -} -func (m *OpMetadata) XXX_Size() int { - return m.Size() -} -func (m *OpMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_OpMetadata.DiscardUnknown(m) -} - -var xxx_messageInfo_OpMetadata proto.InternalMessageInfo - -func (m *OpMetadata) GetIgnoreCache() bool { - if m != nil { - return m.IgnoreCache - } - return false -} - -func (m *OpMetadata) GetDescription() map[string]string { - if m != nil { - return m.Description - } - return nil -} - -func (m *OpMetadata) GetExportCache() *ExportCache { - if m != nil { - return m.ExportCache - } - return nil -} - -func (m *OpMetadata) GetCaps() map[github_com_moby_buildkit_util_apicaps.CapID]bool { - if m != nil { - return m.Caps - } - return nil -} - -// Source is a source mapping description for a file -type Source struct { - Locations map[string]*Locations `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - Infos []*SourceInfo `protobuf:"bytes,2,rep,name=infos,proto3" json:"infos,omitempty"` -} - -func (m *Source) Reset() { *m = Source{} } -func (m *Source) String() string { return proto.CompactTextString(m) } -func (*Source) ProtoMessage() {} -func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{13} -} -func (m *Source) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Source) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Source) XXX_Merge(src proto.Message) { - xxx_messageInfo_Source.Merge(m, src) -} -func (m *Source) XXX_Size() int { - return m.Size() -} -func (m *Source) XXX_DiscardUnknown() { - xxx_messageInfo_Source.DiscardUnknown(m) -} - -var xxx_messageInfo_Source proto.InternalMessageInfo - -func (m *Source) GetLocations() map[string]*Locations { - if m != nil { - return m.Locations - } - return nil -} - -func (m *Source) GetInfos() []*SourceInfo { - if m != nil { - return m.Infos - } - return nil -} - -// Locations is a list of ranges with a index to its source map. -type Locations struct { - Locations []*Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` -} - -func (m *Locations) Reset() { *m = Locations{} } -func (m *Locations) String() string { return proto.CompactTextString(m) } -func (*Locations) ProtoMessage() {} -func (*Locations) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{14} -} -func (m *Locations) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Locations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Locations) XXX_Merge(src proto.Message) { - xxx_messageInfo_Locations.Merge(m, src) -} -func (m *Locations) XXX_Size() int { - return m.Size() -} -func (m *Locations) XXX_DiscardUnknown() { - xxx_messageInfo_Locations.DiscardUnknown(m) -} - -var xxx_messageInfo_Locations proto.InternalMessageInfo - -func (m *Locations) GetLocations() []*Location { - if m != nil { - return m.Locations - } - return nil -} - -// Source info contains the shared metadata of a source mapping -type SourceInfo struct { - Filename string `protobuf:"bytes,1,opt,name=filename,proto3" json:"filename,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Definition *Definition `protobuf:"bytes,3,opt,name=definition,proto3" json:"definition,omitempty"` -} - -func (m *SourceInfo) Reset() { *m = SourceInfo{} } -func (m *SourceInfo) String() string { return proto.CompactTextString(m) } -func (*SourceInfo) ProtoMessage() {} -func (*SourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{15} -} -func (m *SourceInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SourceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SourceInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_SourceInfo.Merge(m, src) -} -func (m *SourceInfo) XXX_Size() int { - return m.Size() -} -func (m *SourceInfo) XXX_DiscardUnknown() { - xxx_messageInfo_SourceInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_SourceInfo proto.InternalMessageInfo - -func (m *SourceInfo) GetFilename() string { - if m != nil { - return m.Filename - } - return "" -} - -func (m *SourceInfo) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *SourceInfo) GetDefinition() *Definition { - if m != nil { - return m.Definition - } - return nil -} - -// Location defines list of areas in to source file -type Location struct { - SourceIndex int32 `protobuf:"varint,1,opt,name=sourceIndex,proto3" json:"sourceIndex,omitempty"` - Ranges []*Range `protobuf:"bytes,2,rep,name=ranges,proto3" json:"ranges,omitempty"` -} - -func (m *Location) Reset() { *m = Location{} } -func (m *Location) String() string { return proto.CompactTextString(m) } -func (*Location) ProtoMessage() {} -func (*Location) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{16} -} -func (m *Location) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Location) XXX_Merge(src proto.Message) { - xxx_messageInfo_Location.Merge(m, src) -} -func (m *Location) XXX_Size() int { - return m.Size() -} -func (m *Location) XXX_DiscardUnknown() { - xxx_messageInfo_Location.DiscardUnknown(m) -} - -var xxx_messageInfo_Location proto.InternalMessageInfo - -func (m *Location) GetSourceIndex() int32 { - if m != nil { - return m.SourceIndex - } - return 0 -} - -func (m *Location) GetRanges() []*Range { - if m != nil { - return m.Ranges - } - return nil -} - -// Range is an area in the source file -type Range struct { - Start Position `protobuf:"bytes,1,opt,name=start,proto3" json:"start"` - End Position `protobuf:"bytes,2,opt,name=end,proto3" json:"end"` -} - -func (m *Range) Reset() { *m = Range{} } -func (m *Range) String() string { return proto.CompactTextString(m) } -func (*Range) ProtoMessage() {} -func (*Range) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{17} -} -func (m *Range) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Range) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Range) XXX_Merge(src proto.Message) { - xxx_messageInfo_Range.Merge(m, src) -} -func (m *Range) XXX_Size() int { - return m.Size() -} -func (m *Range) XXX_DiscardUnknown() { - xxx_messageInfo_Range.DiscardUnknown(m) -} - -var xxx_messageInfo_Range proto.InternalMessageInfo - -func (m *Range) GetStart() Position { - if m != nil { - return m.Start - } - return Position{} -} - -func (m *Range) GetEnd() Position { - if m != nil { - return m.End - } - return Position{} -} - -// Position is single location in a source file -type Position struct { - Line int32 `protobuf:"varint,1,opt,name=Line,proto3" json:"Line,omitempty"` - Character int32 `protobuf:"varint,2,opt,name=Character,proto3" json:"Character,omitempty"` -} - -func (m *Position) Reset() { *m = Position{} } -func (m *Position) String() string { return proto.CompactTextString(m) } -func (*Position) ProtoMessage() {} -func (*Position) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{18} -} -func (m *Position) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Position) XXX_Merge(src proto.Message) { - xxx_messageInfo_Position.Merge(m, src) -} -func (m *Position) XXX_Size() int { - return m.Size() -} -func (m *Position) XXX_DiscardUnknown() { - xxx_messageInfo_Position.DiscardUnknown(m) -} - -var xxx_messageInfo_Position proto.InternalMessageInfo - -func (m *Position) GetLine() int32 { - if m != nil { - return m.Line - } - return 0 -} - -func (m *Position) GetCharacter() int32 { - if m != nil { - return m.Character - } - return 0 -} - -type ExportCache struct { - Value bool `protobuf:"varint,1,opt,name=Value,proto3" json:"Value,omitempty"` -} - -func (m *ExportCache) Reset() { *m = ExportCache{} } -func (m *ExportCache) String() string { return proto.CompactTextString(m) } -func (*ExportCache) ProtoMessage() {} -func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{19} -} -func (m *ExportCache) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportCache) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExportCache) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportCache.Merge(m, src) -} -func (m *ExportCache) XXX_Size() int { - return m.Size() -} -func (m *ExportCache) XXX_DiscardUnknown() { - xxx_messageInfo_ExportCache.DiscardUnknown(m) -} - -var xxx_messageInfo_ExportCache proto.InternalMessageInfo - -func (m *ExportCache) GetValue() bool { - if m != nil { - return m.Value - } - return false -} - -type ProxyEnv struct { - HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` - HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` - FtpProxy string `protobuf:"bytes,3,opt,name=ftp_proxy,json=ftpProxy,proto3" json:"ftp_proxy,omitempty"` - NoProxy string `protobuf:"bytes,4,opt,name=no_proxy,json=noProxy,proto3" json:"no_proxy,omitempty"` - AllProxy string `protobuf:"bytes,5,opt,name=all_proxy,json=allProxy,proto3" json:"all_proxy,omitempty"` -} - -func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } -func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } -func (*ProxyEnv) ProtoMessage() {} -func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{20} -} -func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProxyEnv) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ProxyEnv) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProxyEnv.Merge(m, src) -} -func (m *ProxyEnv) XXX_Size() int { - return m.Size() -} -func (m *ProxyEnv) XXX_DiscardUnknown() { - xxx_messageInfo_ProxyEnv.DiscardUnknown(m) -} - -var xxx_messageInfo_ProxyEnv proto.InternalMessageInfo - -func (m *ProxyEnv) GetHttpProxy() string { - if m != nil { - return m.HttpProxy - } - return "" -} - -func (m *ProxyEnv) GetHttpsProxy() string { - if m != nil { - return m.HttpsProxy - } - return "" -} - -func (m *ProxyEnv) GetFtpProxy() string { - if m != nil { - return m.FtpProxy - } - return "" -} - -func (m *ProxyEnv) GetNoProxy() string { - if m != nil { - return m.NoProxy - } - return "" -} - -func (m *ProxyEnv) GetAllProxy() string { - if m != nil { - return m.AllProxy - } - return "" -} - -// WorkerConstraints defines conditions for the worker -type WorkerConstraints struct { - Filter []string `protobuf:"bytes,1,rep,name=filter,proto3" json:"filter,omitempty"` -} - -func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } -func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } -func (*WorkerConstraints) ProtoMessage() {} -func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{21} -} -func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WorkerConstraints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WorkerConstraints) XXX_Merge(src proto.Message) { - xxx_messageInfo_WorkerConstraints.Merge(m, src) -} -func (m *WorkerConstraints) XXX_Size() int { - return m.Size() -} -func (m *WorkerConstraints) XXX_DiscardUnknown() { - xxx_messageInfo_WorkerConstraints.DiscardUnknown(m) -} - -var xxx_messageInfo_WorkerConstraints proto.InternalMessageInfo - -func (m *WorkerConstraints) GetFilter() []string { - if m != nil { - return m.Filter - } - return nil -} - -// Definition is the LLB definition structure with per-vertex metadata entries -type Definition struct { - // def is a list of marshaled Op messages - Def [][]byte `protobuf:"bytes,1,rep,name=def,proto3" json:"def,omitempty"` - // metadata contains metadata for the each of the Op messages. - // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. - Metadata map[github_com_opencontainers_go_digest.Digest]OpMetadata `protobuf:"bytes,2,rep,name=metadata,proto3,castkey=github.com/opencontainers/go-digest.Digest" json:"metadata" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` - // Source contains the source mapping information for the vertexes in the definition - Source *Source `protobuf:"bytes,3,opt,name=Source,proto3" json:"Source,omitempty"` -} - -func (m *Definition) Reset() { *m = Definition{} } -func (m *Definition) String() string { return proto.CompactTextString(m) } -func (*Definition) ProtoMessage() {} -func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{22} -} -func (m *Definition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Definition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Definition) XXX_Merge(src proto.Message) { - xxx_messageInfo_Definition.Merge(m, src) -} -func (m *Definition) XXX_Size() int { - return m.Size() -} -func (m *Definition) XXX_DiscardUnknown() { - xxx_messageInfo_Definition.DiscardUnknown(m) -} - -var xxx_messageInfo_Definition proto.InternalMessageInfo - -func (m *Definition) GetDef() [][]byte { - if m != nil { - return m.Def - } - return nil -} - -func (m *Definition) GetMetadata() map[github_com_opencontainers_go_digest.Digest]OpMetadata { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *Definition) GetSource() *Source { - if m != nil { - return m.Source - } - return nil -} - -type HostIP struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` -} - -func (m *HostIP) Reset() { *m = HostIP{} } -func (m *HostIP) String() string { return proto.CompactTextString(m) } -func (*HostIP) ProtoMessage() {} -func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{23} -} -func (m *HostIP) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostIP) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostIP.Merge(m, src) -} -func (m *HostIP) XXX_Size() int { - return m.Size() -} -func (m *HostIP) XXX_DiscardUnknown() { - xxx_messageInfo_HostIP.DiscardUnknown(m) -} - -var xxx_messageInfo_HostIP proto.InternalMessageInfo - -func (m *HostIP) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *HostIP) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - -type FileOp struct { - Actions []*FileAction `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` -} - -func (m *FileOp) Reset() { *m = FileOp{} } -func (m *FileOp) String() string { return proto.CompactTextString(m) } -func (*FileOp) ProtoMessage() {} -func (*FileOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{24} -} -func (m *FileOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FileOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileOp.Merge(m, src) -} -func (m *FileOp) XXX_Size() int { - return m.Size() -} -func (m *FileOp) XXX_DiscardUnknown() { - xxx_messageInfo_FileOp.DiscardUnknown(m) -} - -var xxx_messageInfo_FileOp proto.InternalMessageInfo - -func (m *FileOp) GetActions() []*FileAction { - if m != nil { - return m.Actions - } - return nil -} - -type FileAction struct { - Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` - SecondaryInput InputIndex `protobuf:"varint,2,opt,name=secondaryInput,proto3,customtype=InputIndex" json:"secondaryInput"` - Output OutputIndex `protobuf:"varint,3,opt,name=output,proto3,customtype=OutputIndex" json:"output"` - // Types that are valid to be assigned to Action: - // *FileAction_Copy - // *FileAction_Mkfile - // *FileAction_Mkdir - // *FileAction_Rm - Action isFileAction_Action `protobuf_oneof:"action"` -} - -func (m *FileAction) Reset() { *m = FileAction{} } -func (m *FileAction) String() string { return proto.CompactTextString(m) } -func (*FileAction) ProtoMessage() {} -func (*FileAction) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{25} -} -func (m *FileAction) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FileAction) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileAction.Merge(m, src) -} -func (m *FileAction) XXX_Size() int { - return m.Size() -} -func (m *FileAction) XXX_DiscardUnknown() { - xxx_messageInfo_FileAction.DiscardUnknown(m) -} - -var xxx_messageInfo_FileAction proto.InternalMessageInfo - -type isFileAction_Action interface { - isFileAction_Action() - MarshalTo([]byte) (int, error) - Size() int -} - -type FileAction_Copy struct { - Copy *FileActionCopy `protobuf:"bytes,4,opt,name=copy,proto3,oneof" json:"copy,omitempty"` -} -type FileAction_Mkfile struct { - Mkfile *FileActionMkFile `protobuf:"bytes,5,opt,name=mkfile,proto3,oneof" json:"mkfile,omitempty"` -} -type FileAction_Mkdir struct { - Mkdir *FileActionMkDir `protobuf:"bytes,6,opt,name=mkdir,proto3,oneof" json:"mkdir,omitempty"` -} -type FileAction_Rm struct { - Rm *FileActionRm `protobuf:"bytes,7,opt,name=rm,proto3,oneof" json:"rm,omitempty"` -} - -func (*FileAction_Copy) isFileAction_Action() {} -func (*FileAction_Mkfile) isFileAction_Action() {} -func (*FileAction_Mkdir) isFileAction_Action() {} -func (*FileAction_Rm) isFileAction_Action() {} - -func (m *FileAction) GetAction() isFileAction_Action { - if m != nil { - return m.Action - } - return nil -} - -func (m *FileAction) GetCopy() *FileActionCopy { - if x, ok := m.GetAction().(*FileAction_Copy); ok { - return x.Copy - } - return nil -} - -func (m *FileAction) GetMkfile() *FileActionMkFile { - if x, ok := m.GetAction().(*FileAction_Mkfile); ok { - return x.Mkfile - } - return nil -} - -func (m *FileAction) GetMkdir() *FileActionMkDir { - if x, ok := m.GetAction().(*FileAction_Mkdir); ok { - return x.Mkdir - } - return nil -} - -func (m *FileAction) GetRm() *FileActionRm { - if x, ok := m.GetAction().(*FileAction_Rm); ok { - return x.Rm - } - return nil -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*FileAction) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*FileAction_Copy)(nil), - (*FileAction_Mkfile)(nil), - (*FileAction_Mkdir)(nil), - (*FileAction_Rm)(nil), - } -} - -type FileActionCopy struct { - // src is the source path - Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` - // dest path - Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` - // optional owner override - Owner *ChownOpt `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` - // optional permission bits override - Mode int32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` - // followSymlink resolves symlinks in src - FollowSymlink bool `protobuf:"varint,5,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` - // dirCopyContents only copies contents if src is a directory - DirCopyContents bool `protobuf:"varint,6,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` - // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead - AttemptUnpackDockerCompatibility bool `protobuf:"varint,7,opt,name=attemptUnpackDockerCompatibility,proto3" json:"attemptUnpackDockerCompatibility,omitempty"` - // createDestPath creates dest path directories if needed - CreateDestPath bool `protobuf:"varint,8,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` - // allowWildcard allows filepath.Match wildcards in src path - AllowWildcard bool `protobuf:"varint,9,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` - // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files - AllowEmptyWildcard bool `protobuf:"varint,10,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` - // optional created time override - Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - // include only files/dirs matching at least one of these patterns - IncludePatterns []string `protobuf:"bytes,12,rep,name=include_patterns,json=includePatterns,proto3" json:"include_patterns,omitempty"` - // exclude files/dir matching any of these patterns (even if they match an include pattern) - ExcludePatterns []string `protobuf:"bytes,13,rep,name=exclude_patterns,json=excludePatterns,proto3" json:"exclude_patterns,omitempty"` -} - -func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } -func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } -func (*FileActionCopy) ProtoMessage() {} -func (*FileActionCopy) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{26} -} -func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileActionCopy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FileActionCopy) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileActionCopy.Merge(m, src) -} -func (m *FileActionCopy) XXX_Size() int { - return m.Size() -} -func (m *FileActionCopy) XXX_DiscardUnknown() { - xxx_messageInfo_FileActionCopy.DiscardUnknown(m) -} - -var xxx_messageInfo_FileActionCopy proto.InternalMessageInfo - -func (m *FileActionCopy) GetSrc() string { - if m != nil { - return m.Src - } - return "" -} - -func (m *FileActionCopy) GetDest() string { - if m != nil { - return m.Dest - } - return "" -} - -func (m *FileActionCopy) GetOwner() *ChownOpt { - if m != nil { - return m.Owner - } - return nil -} - -func (m *FileActionCopy) GetMode() int32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *FileActionCopy) GetFollowSymlink() bool { - if m != nil { - return m.FollowSymlink - } - return false -} - -func (m *FileActionCopy) GetDirCopyContents() bool { - if m != nil { - return m.DirCopyContents - } - return false -} - -func (m *FileActionCopy) GetAttemptUnpackDockerCompatibility() bool { - if m != nil { - return m.AttemptUnpackDockerCompatibility - } - return false -} - -func (m *FileActionCopy) GetCreateDestPath() bool { - if m != nil { - return m.CreateDestPath - } - return false -} - -func (m *FileActionCopy) GetAllowWildcard() bool { - if m != nil { - return m.AllowWildcard - } - return false -} - -func (m *FileActionCopy) GetAllowEmptyWildcard() bool { - if m != nil { - return m.AllowEmptyWildcard - } - return false -} - -func (m *FileActionCopy) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -func (m *FileActionCopy) GetIncludePatterns() []string { - if m != nil { - return m.IncludePatterns - } - return nil -} - -func (m *FileActionCopy) GetExcludePatterns() []string { - if m != nil { - return m.ExcludePatterns - } - return nil -} - -type FileActionMkFile struct { - // path for the new file - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - // permission bits - Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` - // data is the new file contents - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - // optional owner for the new file - Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - // optional created time override - Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` -} - -func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } -func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } -func (*FileActionMkFile) ProtoMessage() {} -func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{27} -} -func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileActionMkFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FileActionMkFile) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileActionMkFile.Merge(m, src) -} -func (m *FileActionMkFile) XXX_Size() int { - return m.Size() -} -func (m *FileActionMkFile) XXX_DiscardUnknown() { - xxx_messageInfo_FileActionMkFile.DiscardUnknown(m) -} - -var xxx_messageInfo_FileActionMkFile proto.InternalMessageInfo - -func (m *FileActionMkFile) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *FileActionMkFile) GetMode() int32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *FileActionMkFile) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func (m *FileActionMkFile) GetOwner() *ChownOpt { - if m != nil { - return m.Owner - } - return nil -} - -func (m *FileActionMkFile) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -type FileActionMkDir struct { - // path for the new directory - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - // permission bits - Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` - // makeParents creates parent directories as well if needed - MakeParents bool `protobuf:"varint,3,opt,name=makeParents,proto3" json:"makeParents,omitempty"` - // optional owner for the new directory - Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - // optional created time override - Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` -} - -func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } -func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } -func (*FileActionMkDir) ProtoMessage() {} -func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{28} -} -func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileActionMkDir) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FileActionMkDir) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileActionMkDir.Merge(m, src) -} -func (m *FileActionMkDir) XXX_Size() int { - return m.Size() -} -func (m *FileActionMkDir) XXX_DiscardUnknown() { - xxx_messageInfo_FileActionMkDir.DiscardUnknown(m) -} - -var xxx_messageInfo_FileActionMkDir proto.InternalMessageInfo - -func (m *FileActionMkDir) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *FileActionMkDir) GetMode() int32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *FileActionMkDir) GetMakeParents() bool { - if m != nil { - return m.MakeParents - } - return false -} - -func (m *FileActionMkDir) GetOwner() *ChownOpt { - if m != nil { - return m.Owner - } - return nil -} - -func (m *FileActionMkDir) GetTimestamp() int64 { - if m != nil { - return m.Timestamp - } - return 0 -} - -type FileActionRm struct { - // path to remove - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - // allowNotFound doesn't fail the rm if file is not found - AllowNotFound bool `protobuf:"varint,2,opt,name=allowNotFound,proto3" json:"allowNotFound,omitempty"` - // allowWildcard allows filepath.Match wildcards in path - AllowWildcard bool `protobuf:"varint,3,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` -} - -func (m *FileActionRm) Reset() { *m = FileActionRm{} } -func (m *FileActionRm) String() string { return proto.CompactTextString(m) } -func (*FileActionRm) ProtoMessage() {} -func (*FileActionRm) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{29} -} -func (m *FileActionRm) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FileActionRm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FileActionRm) XXX_Merge(src proto.Message) { - xxx_messageInfo_FileActionRm.Merge(m, src) -} -func (m *FileActionRm) XXX_Size() int { - return m.Size() -} -func (m *FileActionRm) XXX_DiscardUnknown() { - xxx_messageInfo_FileActionRm.DiscardUnknown(m) -} - -var xxx_messageInfo_FileActionRm proto.InternalMessageInfo - -func (m *FileActionRm) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *FileActionRm) GetAllowNotFound() bool { - if m != nil { - return m.AllowNotFound - } - return false -} - -func (m *FileActionRm) GetAllowWildcard() bool { - if m != nil { - return m.AllowWildcard - } - return false -} - -type ChownOpt struct { - User *UserOpt `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` - Group *UserOpt `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` -} - -func (m *ChownOpt) Reset() { *m = ChownOpt{} } -func (m *ChownOpt) String() string { return proto.CompactTextString(m) } -func (*ChownOpt) ProtoMessage() {} -func (*ChownOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{30} -} -func (m *ChownOpt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ChownOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ChownOpt) XXX_Merge(src proto.Message) { - xxx_messageInfo_ChownOpt.Merge(m, src) -} -func (m *ChownOpt) XXX_Size() int { - return m.Size() -} -func (m *ChownOpt) XXX_DiscardUnknown() { - xxx_messageInfo_ChownOpt.DiscardUnknown(m) -} - -var xxx_messageInfo_ChownOpt proto.InternalMessageInfo - -func (m *ChownOpt) GetUser() *UserOpt { - if m != nil { - return m.User - } - return nil -} - -func (m *ChownOpt) GetGroup() *UserOpt { - if m != nil { - return m.Group - } - return nil -} - -type UserOpt struct { - // Types that are valid to be assigned to User: - // *UserOpt_ByName - // *UserOpt_ByID - User isUserOpt_User `protobuf_oneof:"user"` -} - -func (m *UserOpt) Reset() { *m = UserOpt{} } -func (m *UserOpt) String() string { return proto.CompactTextString(m) } -func (*UserOpt) ProtoMessage() {} -func (*UserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{31} -} -func (m *UserOpt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *UserOpt) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserOpt.Merge(m, src) -} -func (m *UserOpt) XXX_Size() int { - return m.Size() -} -func (m *UserOpt) XXX_DiscardUnknown() { - xxx_messageInfo_UserOpt.DiscardUnknown(m) -} - -var xxx_messageInfo_UserOpt proto.InternalMessageInfo - -type isUserOpt_User interface { - isUserOpt_User() - MarshalTo([]byte) (int, error) - Size() int -} - -type UserOpt_ByName struct { - ByName *NamedUserOpt `protobuf:"bytes,1,opt,name=byName,proto3,oneof" json:"byName,omitempty"` -} -type UserOpt_ByID struct { - ByID uint32 `protobuf:"varint,2,opt,name=byID,proto3,oneof" json:"byID,omitempty"` -} - -func (*UserOpt_ByName) isUserOpt_User() {} -func (*UserOpt_ByID) isUserOpt_User() {} - -func (m *UserOpt) GetUser() isUserOpt_User { - if m != nil { - return m.User - } - return nil -} - -func (m *UserOpt) GetByName() *NamedUserOpt { - if x, ok := m.GetUser().(*UserOpt_ByName); ok { - return x.ByName - } - return nil -} - -func (m *UserOpt) GetByID() uint32 { - if x, ok := m.GetUser().(*UserOpt_ByID); ok { - return x.ByID - } - return 0 -} - -// XXX_OneofWrappers is for the internal use of the proto package. -func (*UserOpt) XXX_OneofWrappers() []interface{} { - return []interface{}{ - (*UserOpt_ByName)(nil), - (*UserOpt_ByID)(nil), - } -} - -type NamedUserOpt struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Input InputIndex `protobuf:"varint,2,opt,name=input,proto3,customtype=InputIndex" json:"input"` -} - -func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} } -func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) } -func (*NamedUserOpt) ProtoMessage() {} -func (*NamedUserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{32} -} -func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamedUserOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamedUserOpt) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamedUserOpt.Merge(m, src) -} -func (m *NamedUserOpt) XXX_Size() int { - return m.Size() -} -func (m *NamedUserOpt) XXX_DiscardUnknown() { - xxx_messageInfo_NamedUserOpt.DiscardUnknown(m) -} - -var xxx_messageInfo_NamedUserOpt proto.InternalMessageInfo - -func (m *NamedUserOpt) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func init() { - proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value) - proto.RegisterEnum("pb.SecurityMode", SecurityMode_name, SecurityMode_value) - proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) - proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value) - proto.RegisterType((*Op)(nil), "pb.Op") - proto.RegisterType((*Platform)(nil), "pb.Platform") - proto.RegisterType((*Input)(nil), "pb.Input") - proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") - proto.RegisterType((*Meta)(nil), "pb.Meta") - proto.RegisterType((*Mount)(nil), "pb.Mount") - proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") - proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt") - proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt") - proto.RegisterType((*SourceOp)(nil), "pb.SourceOp") - proto.RegisterMapType((map[string]string)(nil), "pb.SourceOp.AttrsEntry") - proto.RegisterType((*BuildOp)(nil), "pb.BuildOp") - proto.RegisterMapType((map[string]string)(nil), "pb.BuildOp.AttrsEntry") - proto.RegisterMapType((map[string]*BuildInput)(nil), "pb.BuildOp.InputsEntry") - proto.RegisterType((*BuildInput)(nil), "pb.BuildInput") - proto.RegisterType((*OpMetadata)(nil), "pb.OpMetadata") - proto.RegisterMapType((map[github_com_moby_buildkit_util_apicaps.CapID]bool)(nil), "pb.OpMetadata.CapsEntry") - proto.RegisterMapType((map[string]string)(nil), "pb.OpMetadata.DescriptionEntry") - proto.RegisterType((*Source)(nil), "pb.Source") - proto.RegisterMapType((map[string]*Locations)(nil), "pb.Source.LocationsEntry") - proto.RegisterType((*Locations)(nil), "pb.Locations") - proto.RegisterType((*SourceInfo)(nil), "pb.SourceInfo") - proto.RegisterType((*Location)(nil), "pb.Location") - proto.RegisterType((*Range)(nil), "pb.Range") - proto.RegisterType((*Position)(nil), "pb.Position") - proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") - proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") - proto.RegisterType((*WorkerConstraints)(nil), "pb.WorkerConstraints") - proto.RegisterType((*Definition)(nil), "pb.Definition") - proto.RegisterMapType((map[github_com_opencontainers_go_digest.Digest]OpMetadata)(nil), "pb.Definition.MetadataEntry") - proto.RegisterType((*HostIP)(nil), "pb.HostIP") - proto.RegisterType((*FileOp)(nil), "pb.FileOp") - proto.RegisterType((*FileAction)(nil), "pb.FileAction") - proto.RegisterType((*FileActionCopy)(nil), "pb.FileActionCopy") - proto.RegisterType((*FileActionMkFile)(nil), "pb.FileActionMkFile") - proto.RegisterType((*FileActionMkDir)(nil), "pb.FileActionMkDir") - proto.RegisterType((*FileActionRm)(nil), "pb.FileActionRm") - proto.RegisterType((*ChownOpt)(nil), "pb.ChownOpt") - proto.RegisterType((*UserOpt)(nil), "pb.UserOpt") - proto.RegisterType((*NamedUserOpt)(nil), "pb.NamedUserOpt") -} - -func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } - -var fileDescriptor_8de16154b2733812 = []byte{ - // 2267 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6e, 0x1c, 0xc7, - 0xf1, 0xe7, 0x7e, 0xef, 0xd6, 0x2e, 0xa9, 0xfd, 0xb7, 0x65, 0x7b, 0xcd, 0xbf, 0x42, 0xd2, 0x63, - 0xc7, 0xa0, 0x28, 0x69, 0x09, 0xd0, 0x80, 0x65, 0x18, 0x41, 0x10, 0xee, 0x87, 0xc0, 0xb5, 0x25, - 0x2e, 0xd1, 0x2b, 0xc9, 0xb9, 0x09, 0xc3, 0xd9, 0x26, 0x39, 0xe0, 0xec, 0xf4, 0xa0, 0xa7, 0x57, - 0xe2, 0x5e, 0x72, 0xf0, 0x13, 0x18, 0x08, 0x90, 0x5b, 0x12, 0xe4, 0x1d, 0x72, 0xcd, 0x31, 0x80, - 0x8f, 0x3e, 0xe4, 0x60, 0xe4, 0xe0, 0x04, 0xd2, 0x3d, 0x4f, 0x90, 0x00, 0x41, 0x55, 0xf7, 0x7c, - 0x2c, 0x45, 0x41, 0x12, 0x12, 0xe4, 0x34, 0xdd, 0x55, 0xbf, 0xaa, 0xae, 0xae, 0xaa, 0xae, 0xae, - 0x1e, 0x68, 0xc8, 0x28, 0xee, 0x46, 0x4a, 0x6a, 0xc9, 0x8a, 0xd1, 0xf1, 0xfa, 0x9d, 0x53, 0x5f, - 0x9f, 0xcd, 0x8f, 0xbb, 0x9e, 0x9c, 0xed, 0x9e, 0xca, 0x53, 0xb9, 0x4b, 0xac, 0xe3, 0xf9, 0x09, - 0xcd, 0x68, 0x42, 0x23, 0x23, 0xe2, 0xfc, 0xa1, 0x08, 0xc5, 0x71, 0xc4, 0x3e, 0x84, 0xaa, 0x1f, - 0x46, 0x73, 0x1d, 0x77, 0x0a, 0x5b, 0xa5, 0xed, 0xe6, 0x5e, 0xa3, 0x1b, 0x1d, 0x77, 0x47, 0x48, - 0xe1, 0x96, 0xc1, 0xb6, 0xa0, 0x2c, 0x2e, 0x84, 0xd7, 0x29, 0x6e, 0x15, 0xb6, 0x9b, 0x7b, 0x80, - 0x80, 0xe1, 0x85, 0xf0, 0xc6, 0xd1, 0xc1, 0x0a, 0x27, 0x0e, 0xfb, 0x04, 0xaa, 0xb1, 0x9c, 0x2b, - 0x4f, 0x74, 0x4a, 0x84, 0x69, 0x21, 0x66, 0x42, 0x14, 0x42, 0x59, 0x2e, 0x6a, 0x3a, 0xf1, 0x03, - 0xd1, 0x29, 0x67, 0x9a, 0xee, 0xf9, 0x81, 0xc1, 0x10, 0x87, 0x7d, 0x04, 0x95, 0xe3, 0xb9, 0x1f, - 0x4c, 0x3b, 0x15, 0x82, 0x34, 0x11, 0xd2, 0x43, 0x02, 0x61, 0x0c, 0x8f, 0x6d, 0x43, 0x3d, 0x0a, - 0x5c, 0x7d, 0x22, 0xd5, 0xac, 0x03, 0xd9, 0x82, 0x47, 0x96, 0xc6, 0x53, 0x2e, 0xbb, 0x0b, 0x4d, - 0x4f, 0x86, 0xb1, 0x56, 0xae, 0x1f, 0xea, 0xb8, 0xd3, 0x24, 0xf0, 0xbb, 0x08, 0xfe, 0x5a, 0xaa, - 0x73, 0xa1, 0xfa, 0x19, 0x93, 0xe7, 0x91, 0xbd, 0x32, 0x14, 0x65, 0xe4, 0xfc, 0xa6, 0x00, 0xf5, - 0x44, 0x2b, 0x73, 0xa0, 0xb5, 0xaf, 0xbc, 0x33, 0x5f, 0x0b, 0x4f, 0xcf, 0x95, 0xe8, 0x14, 0xb6, - 0x0a, 0xdb, 0x0d, 0xbe, 0x44, 0x63, 0x6b, 0x50, 0x1c, 0x4f, 0xc8, 0x51, 0x0d, 0x5e, 0x1c, 0x4f, - 0x58, 0x07, 0x6a, 0x8f, 0x5d, 0xe5, 0xbb, 0xa1, 0x26, 0xcf, 0x34, 0x78, 0x32, 0x65, 0x37, 0xa0, - 0x31, 0x9e, 0x3c, 0x16, 0x2a, 0xf6, 0x65, 0x48, 0xfe, 0x68, 0xf0, 0x8c, 0xc0, 0x36, 0x00, 0xc6, - 0x93, 0x7b, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x55, 0xda, 0x6e, 0xf0, 0x1c, 0xc5, 0xf9, 0x15, - 0x54, 0x28, 0x46, 0xec, 0x4b, 0xa8, 0x4e, 0xfd, 0x53, 0x11, 0x6b, 0x63, 0x4e, 0x6f, 0xef, 0xbb, - 0x1f, 0x37, 0x57, 0xfe, 0xfa, 0xe3, 0xe6, 0x4e, 0x2e, 0x19, 0x64, 0x24, 0x42, 0x4f, 0x86, 0xda, - 0xf5, 0x43, 0xa1, 0xe2, 0xdd, 0x53, 0x79, 0xc7, 0x88, 0x74, 0x07, 0xf4, 0xe1, 0x56, 0x03, 0xbb, - 0x09, 0x15, 0x3f, 0x9c, 0x8a, 0x0b, 0xb2, 0xbf, 0xd4, 0x7b, 0xc7, 0xaa, 0x6a, 0x8e, 0xe7, 0x3a, - 0x9a, 0xeb, 0x11, 0xb2, 0xb8, 0x41, 0x38, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x00, 0xbb, 0x01, 0xe5, - 0x99, 0xd0, 0x2e, 0xad, 0xdf, 0xdc, 0xab, 0xa3, 0x6f, 0x1f, 0x08, 0xed, 0x72, 0xa2, 0x62, 0x7a, - 0xcd, 0xe4, 0x1c, 0x7d, 0x5f, 0xcc, 0xd2, 0xeb, 0x01, 0x52, 0xb8, 0x65, 0xb0, 0x9f, 0x42, 0x2d, - 0x14, 0xfa, 0x99, 0x54, 0xe7, 0xe4, 0xa3, 0x35, 0x13, 0xf4, 0x43, 0xa1, 0x1f, 0xc8, 0xa9, 0xe0, - 0x09, 0x8f, 0xdd, 0x86, 0x7a, 0x2c, 0xbc, 0xb9, 0xf2, 0xf5, 0x82, 0xfc, 0xb5, 0xb6, 0xd7, 0xa6, - 0x2c, 0xb3, 0x34, 0x02, 0xa7, 0x08, 0xe7, 0xcf, 0x05, 0x28, 0xa3, 0x19, 0x8c, 0x41, 0xd9, 0x55, - 0xa7, 0x26, 0xbb, 0x1b, 0x9c, 0xc6, 0xac, 0x0d, 0x25, 0x11, 0x3e, 0x25, 0x8b, 0x1a, 0x1c, 0x87, - 0x48, 0xf1, 0x9e, 0x4d, 0x6d, 0x8c, 0x70, 0x88, 0x72, 0xf3, 0x58, 0x28, 0x1b, 0x1a, 0x1a, 0xb3, - 0x9b, 0xd0, 0x88, 0x94, 0xbc, 0x58, 0x3c, 0x41, 0xe9, 0x4a, 0x2e, 0xf1, 0x90, 0x38, 0x0c, 0x9f, - 0xf2, 0x7a, 0x64, 0x47, 0x6c, 0x07, 0x40, 0x5c, 0x68, 0xe5, 0x1e, 0xc8, 0x58, 0xc7, 0x9d, 0x2a, - 0xed, 0x9d, 0xf2, 0x1d, 0x09, 0xa3, 0x23, 0x9e, 0xe3, 0xb2, 0x75, 0xa8, 0x9f, 0xc9, 0x58, 0x87, - 0xee, 0x4c, 0x74, 0x6a, 0xb4, 0x5c, 0x3a, 0x77, 0xfe, 0x51, 0x84, 0x0a, 0xb9, 0x8b, 0x6d, 0x63, - 0x74, 0xa2, 0xb9, 0x09, 0x74, 0xa9, 0xc7, 0x6c, 0x74, 0x80, 0xf2, 0x20, 0x0d, 0x0e, 0xe6, 0xc4, - 0x3a, 0x7a, 0x2a, 0x10, 0x9e, 0x96, 0xca, 0xa6, 0x62, 0x3a, 0xc7, 0x6d, 0x4d, 0x31, 0x5b, 0xcc, - 0x4e, 0x69, 0xcc, 0x6e, 0x41, 0x55, 0x52, 0x88, 0x69, 0xb3, 0xaf, 0x08, 0xbc, 0x85, 0xa0, 0x72, - 0x25, 0xdc, 0xa9, 0x0c, 0x83, 0x05, 0xb9, 0xa0, 0xce, 0xd3, 0x39, 0xbb, 0x05, 0x0d, 0x8a, 0xe9, - 0xc3, 0x45, 0x24, 0x3a, 0x55, 0x8a, 0xd1, 0x6a, 0x1a, 0x6f, 0x24, 0xf2, 0x8c, 0x8f, 0x87, 0xd8, - 0x73, 0xbd, 0x33, 0x31, 0x8e, 0x74, 0xe7, 0x7a, 0xe6, 0xcb, 0xbe, 0xa5, 0xf1, 0x94, 0x8b, 0x6a, - 0x63, 0xe1, 0x29, 0xa1, 0x11, 0xfa, 0x2e, 0x41, 0x57, 0x6d, 0xe8, 0x0d, 0x91, 0x67, 0x7c, 0xe6, - 0x40, 0x75, 0x32, 0x39, 0x40, 0xe4, 0x7b, 0x59, 0x91, 0x31, 0x14, 0x6e, 0x39, 0x66, 0x0f, 0xf1, - 0x3c, 0xd0, 0xa3, 0x41, 0xe7, 0x7d, 0xe3, 0xa0, 0x64, 0xee, 0x8c, 0xa0, 0x9e, 0x98, 0x80, 0xa7, - 0x79, 0x34, 0xb0, 0xe7, 0xbc, 0x38, 0x1a, 0xb0, 0x3b, 0x50, 0x8b, 0xcf, 0x5c, 0xe5, 0x87, 0xa7, - 0xe4, 0xd7, 0xb5, 0xbd, 0x77, 0x52, 0x8b, 0x27, 0x86, 0x8e, 0xab, 0x24, 0x18, 0x47, 0x42, 0x23, - 0x35, 0xf1, 0x25, 0x5d, 0x6d, 0x28, 0xcd, 0xfd, 0x29, 0xe9, 0x59, 0xe5, 0x38, 0x44, 0xca, 0xa9, - 0x6f, 0x72, 0x70, 0x95, 0xe3, 0x10, 0x83, 0x35, 0x93, 0x53, 0x53, 0x2e, 0x57, 0x39, 0x8d, 0xd1, - 0x76, 0x19, 0x69, 0x5f, 0x86, 0x6e, 0x90, 0xf8, 0x3f, 0x99, 0x3b, 0x41, 0xb2, 0xf7, 0xff, 0xc9, - 0x6a, 0xbf, 0x2e, 0x40, 0x3d, 0xa9, 0xf1, 0x58, 0xb0, 0xfc, 0xa9, 0x08, 0xb5, 0x7f, 0xe2, 0x0b, - 0x65, 0x17, 0xce, 0x51, 0xd8, 0x1d, 0xa8, 0xb8, 0x5a, 0xab, 0xa4, 0x0c, 0xbc, 0x9f, 0xbf, 0x20, - 0xba, 0xfb, 0xc8, 0x19, 0x86, 0x5a, 0x2d, 0xb8, 0x41, 0xad, 0x7f, 0x0e, 0x90, 0x11, 0xd1, 0xd6, - 0x73, 0xb1, 0xb0, 0x5a, 0x71, 0xc8, 0xae, 0x43, 0xe5, 0xa9, 0x1b, 0xcc, 0x85, 0xcd, 0x6f, 0x33, - 0xf9, 0xa2, 0xf8, 0x79, 0xc1, 0xf9, 0x53, 0x11, 0x6a, 0xf6, 0xc2, 0x60, 0xb7, 0xa1, 0x46, 0x17, - 0x86, 0xb5, 0xe8, 0xea, 0x43, 0x93, 0x40, 0xd8, 0x6e, 0x7a, 0x13, 0xe6, 0x6c, 0xb4, 0xaa, 0xcc, - 0x8d, 0x68, 0x6d, 0xcc, 0xee, 0xc5, 0xd2, 0x54, 0x9c, 0xd8, 0x2b, 0x6f, 0x0d, 0xd1, 0x03, 0x71, - 0xe2, 0x87, 0x3e, 0xfa, 0x87, 0x23, 0x8b, 0xdd, 0x4e, 0x76, 0x5d, 0x26, 0x8d, 0xef, 0xe5, 0x35, - 0xbe, 0xbc, 0xe9, 0x11, 0x34, 0x73, 0xcb, 0x5c, 0xb1, 0xeb, 0x8f, 0xf3, 0xbb, 0xb6, 0x4b, 0x92, - 0x3a, 0x73, 0x5f, 0x67, 0x5e, 0xf8, 0x0f, 0xfc, 0xf7, 0x19, 0x40, 0xa6, 0xf2, 0xcd, 0x8b, 0x8e, - 0xf3, 0x4d, 0x09, 0x60, 0x1c, 0x61, 0xc9, 0x9d, 0xba, 0x54, 0xf7, 0x5b, 0xfe, 0x69, 0x28, 0x95, - 0x78, 0x42, 0xc7, 0x98, 0xe4, 0xeb, 0xbc, 0x69, 0x68, 0x74, 0x62, 0xd8, 0x3e, 0x34, 0xa7, 0x22, - 0xf6, 0x94, 0x4f, 0x09, 0x65, 0x9d, 0xbe, 0x89, 0x7b, 0xca, 0xf4, 0x74, 0x07, 0x19, 0xc2, 0xf8, - 0x2a, 0x2f, 0xc3, 0xf6, 0xa0, 0x25, 0x2e, 0x22, 0xa9, 0xb4, 0x5d, 0xc5, 0xf4, 0x15, 0xd7, 0x4c, - 0x87, 0x82, 0x74, 0x5a, 0x89, 0x37, 0x45, 0x36, 0x61, 0x2e, 0x94, 0x3d, 0x37, 0x32, 0x97, 0x6a, - 0x73, 0xaf, 0x73, 0x69, 0xbd, 0xbe, 0x1b, 0x19, 0xa7, 0xf5, 0x3e, 0xc5, 0xbd, 0x7e, 0xf3, 0xb7, - 0xcd, 0x5b, 0xb9, 0x9b, 0x74, 0x26, 0x8f, 0x17, 0xbb, 0x94, 0x2f, 0xe7, 0xbe, 0xde, 0x9d, 0x6b, - 0x3f, 0xd8, 0x75, 0x23, 0x1f, 0xd5, 0xa1, 0xe0, 0x68, 0xc0, 0x49, 0xf5, 0xfa, 0xcf, 0xa1, 0x7d, - 0xd9, 0xee, 0xb7, 0x89, 0xc1, 0xfa, 0x5d, 0x68, 0xa4, 0x76, 0xbc, 0x4e, 0xb0, 0x9e, 0x0f, 0xde, - 0x1f, 0x0b, 0x50, 0x35, 0xa7, 0x8a, 0xdd, 0x85, 0x46, 0x20, 0x3d, 0x17, 0x0d, 0x48, 0x5a, 0xbb, - 0x0f, 0xb2, 0x43, 0xd7, 0xbd, 0x9f, 0xf0, 0x8c, 0x57, 0x33, 0x2c, 0x26, 0x99, 0x1f, 0x9e, 0xc8, - 0xe4, 0x14, 0xac, 0x65, 0x42, 0xa3, 0xf0, 0x44, 0x72, 0xc3, 0x5c, 0xff, 0x0a, 0xd6, 0x96, 0x55, - 0x5c, 0x61, 0xe7, 0x47, 0xcb, 0xe9, 0x4a, 0x35, 0x3b, 0x15, 0xca, 0x9b, 0x7d, 0x17, 0x1a, 0x29, - 0x9d, 0xed, 0xbc, 0x6c, 0x78, 0x2b, 0x2f, 0x99, 0xb3, 0xd5, 0x09, 0x00, 0x32, 0xd3, 0xb0, 0x58, - 0x61, 0x0f, 0x49, 0xf7, 0xa8, 0x31, 0x23, 0x9d, 0xd3, 0xbd, 0xe7, 0x6a, 0x97, 0x4c, 0x69, 0x71, - 0x1a, 0xb3, 0x2e, 0xc0, 0x34, 0x3d, 0xb0, 0xaf, 0x38, 0xc6, 0x39, 0x84, 0x33, 0x86, 0x7a, 0x62, - 0x04, 0xdb, 0x82, 0x66, 0x6c, 0x57, 0xc6, 0x8e, 0x09, 0x97, 0xab, 0xf0, 0x3c, 0x09, 0x3b, 0x1f, - 0xe5, 0x86, 0xa7, 0x62, 0xa9, 0xf3, 0xe1, 0x48, 0xe1, 0x96, 0xe1, 0x7c, 0x0d, 0x15, 0x22, 0xe0, - 0x31, 0x8b, 0xb5, 0xab, 0xb4, 0x6d, 0xa2, 0x4c, 0x53, 0x21, 0x63, 0x5a, 0xb6, 0x57, 0xc6, 0x44, - 0xe4, 0x06, 0xc0, 0x3e, 0xc6, 0xd6, 0x65, 0x6a, 0x3d, 0x7a, 0x15, 0x0e, 0xd9, 0xce, 0xcf, 0xa0, - 0x9e, 0x90, 0x71, 0xe7, 0xf7, 0xfd, 0x50, 0x58, 0x13, 0x69, 0x8c, 0xcd, 0x67, 0xff, 0xcc, 0x55, - 0xae, 0xa7, 0x85, 0x69, 0x11, 0x2a, 0x3c, 0x23, 0x38, 0x1f, 0x41, 0x33, 0x77, 0x7a, 0x30, 0xdd, - 0x1e, 0x53, 0x18, 0xcd, 0x19, 0x36, 0x13, 0xe7, 0xf7, 0xd8, 0x1a, 0x27, 0xdd, 0xce, 0x4f, 0x00, - 0xce, 0xb4, 0x8e, 0x9e, 0x50, 0xfb, 0x63, 0x7d, 0xdf, 0x40, 0x0a, 0x21, 0xd8, 0x26, 0x34, 0x71, - 0x12, 0x5b, 0xbe, 0xc9, 0x77, 0x92, 0x88, 0x0d, 0xe0, 0xff, 0xa1, 0x71, 0x92, 0x8a, 0x97, 0x6c, - 0xe8, 0x12, 0xe9, 0x0f, 0xa0, 0x1e, 0x4a, 0xcb, 0x33, 0xdd, 0x58, 0x2d, 0x94, 0xa9, 0x9c, 0x1b, - 0x04, 0x96, 0x57, 0x31, 0x72, 0x6e, 0x10, 0x10, 0xd3, 0xb9, 0x05, 0xff, 0xf7, 0x52, 0x93, 0xcf, - 0xde, 0x83, 0xea, 0x89, 0x1f, 0x68, 0xba, 0x11, 0xb0, 0xfb, 0xb3, 0x33, 0xe7, 0x5f, 0x05, 0x80, - 0x2c, 0xec, 0x98, 0xcc, 0x58, 0xda, 0x11, 0xd3, 0x32, 0xa5, 0x3c, 0x80, 0xfa, 0xcc, 0x16, 0x09, - 0x1b, 0xd0, 0x1b, 0xcb, 0xa9, 0xd2, 0x4d, 0x6a, 0x88, 0x29, 0x1f, 0x7b, 0xb6, 0x7c, 0xbc, 0x4d, - 0x23, 0x9e, 0xae, 0x40, 0x5d, 0x4c, 0xfe, 0x41, 0x05, 0xd9, 0x29, 0xe4, 0x96, 0xb3, 0xfe, 0x15, - 0xac, 0x2e, 0x2d, 0xf9, 0x86, 0x17, 0x46, 0x56, 0xec, 0xf2, 0x47, 0xf0, 0x36, 0x54, 0x4d, 0x67, - 0x8a, 0xf9, 0x82, 0x23, 0xab, 0x86, 0xc6, 0xd4, 0x4e, 0x1c, 0x25, 0xcf, 0x9a, 0xd1, 0x91, 0xb3, - 0x07, 0x55, 0xf3, 0x6e, 0x63, 0xdb, 0x50, 0x73, 0x3d, 0x73, 0x56, 0x73, 0xf5, 0x02, 0x99, 0xfb, - 0x44, 0xe6, 0x09, 0xdb, 0xf9, 0x4b, 0x11, 0x20, 0xa3, 0xbf, 0x45, 0x3b, 0xfb, 0x05, 0xac, 0xc5, - 0xc2, 0x93, 0xe1, 0xd4, 0x55, 0x0b, 0xe2, 0xda, 0xf7, 0xc9, 0x55, 0x22, 0x97, 0x90, 0xb9, 0xd6, - 0xb6, 0xf4, 0xfa, 0xd6, 0x76, 0x1b, 0xca, 0x9e, 0x8c, 0x16, 0xf6, 0x16, 0x61, 0xcb, 0x1b, 0xe9, - 0xcb, 0x68, 0x81, 0xaf, 0x54, 0x44, 0xb0, 0x2e, 0x54, 0x67, 0xe7, 0xf4, 0x92, 0x35, 0xaf, 0x80, - 0xeb, 0xcb, 0xd8, 0x07, 0xe7, 0x38, 0xc6, 0x77, 0xaf, 0x41, 0xb1, 0x5b, 0x50, 0x99, 0x9d, 0x4f, - 0x7d, 0x45, 0x4d, 0x71, 0xd3, 0xb4, 0x8d, 0x79, 0xf8, 0xc0, 0x57, 0xf8, 0xba, 0x25, 0x0c, 0x73, - 0xa0, 0xa8, 0x66, 0xf4, 0x10, 0x68, 0x9a, 0x27, 0x4e, 0xce, 0x9b, 0xb3, 0x83, 0x15, 0x5e, 0x54, - 0xb3, 0x5e, 0x1d, 0xaa, 0xc6, 0xaf, 0xce, 0x3f, 0x4b, 0xb0, 0xb6, 0x6c, 0x25, 0xe6, 0x41, 0xac, - 0xbc, 0x24, 0x0f, 0x62, 0xe5, 0xa5, 0x5d, 0x7f, 0x31, 0xd7, 0xf5, 0x3b, 0x50, 0x91, 0xcf, 0x42, - 0xa1, 0xf2, 0x4f, 0xf6, 0xfe, 0x99, 0x7c, 0x16, 0x62, 0x0f, 0x6b, 0x58, 0x4b, 0x2d, 0x61, 0xc5, - 0xb6, 0x84, 0x1f, 0xc3, 0xea, 0x89, 0x0c, 0x02, 0xf9, 0x6c, 0xb2, 0x98, 0x05, 0x7e, 0x78, 0x6e, - 0xfb, 0xc2, 0x65, 0x22, 0xdb, 0x86, 0x6b, 0x53, 0x5f, 0xa1, 0x39, 0x7d, 0x19, 0x6a, 0x11, 0xd2, - 0x23, 0x08, 0x71, 0x97, 0xc9, 0xec, 0x4b, 0xd8, 0x72, 0xb5, 0x16, 0xb3, 0x48, 0x3f, 0x0a, 0x23, - 0xd7, 0x3b, 0x1f, 0x48, 0x8f, 0xce, 0xec, 0x2c, 0x72, 0xb5, 0x7f, 0xec, 0x07, 0xf8, 0xde, 0xab, - 0x91, 0xe8, 0x6b, 0x71, 0xec, 0x13, 0x58, 0xf3, 0x94, 0x70, 0xb5, 0x18, 0x88, 0x58, 0x1f, 0xb9, - 0xfa, 0xac, 0x53, 0x27, 0xc9, 0x4b, 0x54, 0xdc, 0x83, 0x8b, 0xd6, 0x7e, 0xed, 0x07, 0x53, 0xcf, - 0x55, 0xd3, 0x4e, 0xc3, 0xec, 0x61, 0x89, 0xc8, 0xba, 0xc0, 0x88, 0x30, 0x9c, 0x45, 0x7a, 0x91, - 0x42, 0x81, 0xa0, 0x57, 0x70, 0xb0, 0xaa, 0x6a, 0x7f, 0x26, 0x62, 0xed, 0xce, 0x22, 0xfa, 0xd5, - 0x50, 0xe2, 0x19, 0x81, 0xdd, 0x84, 0xb6, 0x1f, 0x7a, 0xc1, 0x7c, 0x2a, 0x9e, 0x44, 0xb8, 0x11, - 0x15, 0xc6, 0x9d, 0x16, 0xd5, 0xa0, 0x6b, 0x96, 0x7e, 0x64, 0xc9, 0x08, 0x15, 0x17, 0x97, 0xa0, - 0xab, 0x06, 0x6a, 0xe9, 0x09, 0xd4, 0xf9, 0xb6, 0x00, 0xed, 0xcb, 0x89, 0x87, 0x61, 0x8b, 0x70, - 0xf3, 0xf6, 0x08, 0xe3, 0x38, 0x0d, 0x65, 0x31, 0x17, 0xca, 0xe4, 0x52, 0x2c, 0xe5, 0x2e, 0xc5, - 0x34, 0x2d, 0xca, 0xaf, 0x4e, 0x8b, 0xa5, 0x8d, 0x56, 0x2e, 0x6d, 0xd4, 0xf9, 0x6d, 0x01, 0xae, - 0x5d, 0x4a, 0xee, 0x37, 0xb6, 0x68, 0x0b, 0x9a, 0x33, 0xf7, 0x5c, 0x1c, 0xb9, 0x8a, 0x52, 0xa6, - 0x64, 0xba, 0xc6, 0x1c, 0xe9, 0xbf, 0x60, 0x5f, 0x08, 0xad, 0xfc, 0x89, 0xba, 0xd2, 0xb6, 0x24, - 0x41, 0x0e, 0xa5, 0xbe, 0x27, 0xe7, 0xf6, 0xc2, 0x4d, 0x12, 0x24, 0x21, 0xbe, 0x9c, 0x46, 0xa5, - 0x2b, 0xd2, 0xc8, 0x39, 0x84, 0x7a, 0x62, 0x20, 0xdb, 0xb4, 0x7f, 0x15, 0x0a, 0xd9, 0xdf, 0xad, - 0x47, 0xb1, 0x50, 0x68, 0xbb, 0xf9, 0xc5, 0xf0, 0x21, 0x54, 0x4e, 0x95, 0x9c, 0x47, 0xb6, 0x62, - 0x2f, 0x21, 0x0c, 0xc7, 0x99, 0x40, 0xcd, 0x52, 0xd8, 0x0e, 0x54, 0x8f, 0x17, 0x87, 0x49, 0xbf, - 0x63, 0xcb, 0x05, 0xce, 0xa7, 0x16, 0x81, 0x35, 0xc8, 0x20, 0xd8, 0x75, 0x28, 0x1f, 0x2f, 0x46, - 0x03, 0xf3, 0x06, 0xc4, 0x4a, 0x86, 0xb3, 0x5e, 0xd5, 0x18, 0xe4, 0xdc, 0x87, 0x56, 0x5e, 0x0e, - 0x9d, 0x92, 0xeb, 0xa3, 0x68, 0x9c, 0x95, 0xec, 0xe2, 0x6b, 0x4a, 0xf6, 0xce, 0x36, 0xd4, 0xec, - 0xff, 0x1b, 0xd6, 0x80, 0xca, 0xa3, 0xc3, 0xc9, 0xf0, 0x61, 0x7b, 0x85, 0xd5, 0xa1, 0x7c, 0x30, - 0x9e, 0x3c, 0x6c, 0x17, 0x70, 0x74, 0x38, 0x3e, 0x1c, 0xb6, 0x8b, 0x3b, 0x37, 0xa1, 0x95, 0xff, - 0x83, 0xc3, 0x9a, 0x50, 0x9b, 0xec, 0x1f, 0x0e, 0x7a, 0xe3, 0x5f, 0xb6, 0x57, 0x58, 0x0b, 0xea, - 0xa3, 0xc3, 0xc9, 0xb0, 0xff, 0x88, 0x0f, 0xdb, 0x85, 0x9d, 0x5f, 0x40, 0x23, 0xfd, 0x91, 0x80, - 0x1a, 0x7a, 0xa3, 0xc3, 0x41, 0x7b, 0x85, 0x01, 0x54, 0x27, 0xc3, 0x3e, 0x1f, 0xa2, 0xde, 0x1a, - 0x94, 0x26, 0x93, 0x83, 0x76, 0x11, 0x57, 0xed, 0xef, 0xf7, 0x0f, 0x86, 0xed, 0x12, 0x0e, 0x1f, - 0x3e, 0x38, 0xba, 0x37, 0x69, 0x97, 0x77, 0x3e, 0x83, 0x6b, 0x97, 0x1e, 0xeb, 0x24, 0x7d, 0xb0, - 0xcf, 0x87, 0xa8, 0xa9, 0x09, 0xb5, 0x23, 0x3e, 0x7a, 0xbc, 0xff, 0x70, 0xd8, 0x2e, 0x20, 0xe3, - 0xfe, 0xb8, 0xff, 0xd5, 0x70, 0xd0, 0x2e, 0xf6, 0x6e, 0x7c, 0xf7, 0x7c, 0xa3, 0xf0, 0xfd, 0xf3, - 0x8d, 0xc2, 0x0f, 0xcf, 0x37, 0x0a, 0x7f, 0x7f, 0xbe, 0x51, 0xf8, 0xf6, 0xc5, 0xc6, 0xca, 0xf7, - 0x2f, 0x36, 0x56, 0x7e, 0x78, 0xb1, 0xb1, 0x72, 0x5c, 0xa5, 0xff, 0xa9, 0x9f, 0xfe, 0x3b, 0x00, - 0x00, 0xff, 0xff, 0x3a, 0xd6, 0xef, 0x88, 0x8f, 0x15, 0x00, 0x00, -} - -func (m *Op) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Op) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Op) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Constraints != nil { - { - size, err := m.Constraints.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.Platform != nil { - { - size, err := m.Platform.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.Op != nil { - { - size := m.Op.Size() - i -= size - if _, err := m.Op.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if len(m.Inputs) > 0 { - for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Inputs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Op_Exec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Op_Exec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Exec != nil { - { - size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - return len(dAtA) - i, nil -} -func (m *Op_Source) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Op_Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Source != nil { - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - return len(dAtA) - i, nil -} -func (m *Op_File) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Op_File) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.File != nil { - { - size, err := m.File.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *Op_Build) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Op_Build) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Build != nil { - { - size, err := m.Build.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *Platform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Platform) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Platform) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.OSFeatures) > 0 { - for iNdEx := len(m.OSFeatures) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.OSFeatures[iNdEx]) - copy(dAtA[i:], m.OSFeatures[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(m.OSFeatures[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.OSVersion) > 0 { - i -= len(m.OSVersion) - copy(dAtA[i:], m.OSVersion) - i = encodeVarintOps(dAtA, i, uint64(len(m.OSVersion))) - i-- - dAtA[i] = 0x22 - } - if len(m.Variant) > 0 { - i -= len(m.Variant) - copy(dAtA[i:], m.Variant) - i = encodeVarintOps(dAtA, i, uint64(len(m.Variant))) - i-- - dAtA[i] = 0x1a - } - if len(m.OS) > 0 { - i -= len(m.OS) - copy(dAtA[i:], m.OS) - i = encodeVarintOps(dAtA, i, uint64(len(m.OS))) - i-- - dAtA[i] = 0x12 - } - if len(m.Architecture) > 0 { - i -= len(m.Architecture) - copy(dAtA[i:], m.Architecture) - i = encodeVarintOps(dAtA, i, uint64(len(m.Architecture))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Input) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Input) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Input) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Index != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Index)) - i-- - dAtA[i] = 0x10 - } - if len(m.Digest) > 0 { - i -= len(m.Digest) - copy(dAtA[i:], m.Digest) - i = encodeVarintOps(dAtA, i, uint64(len(m.Digest))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ExecOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Security != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Security)) - i-- - dAtA[i] = 0x20 - } - if m.Network != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Network)) - i-- - dAtA[i] = 0x18 - } - if len(m.Mounts) > 0 { - for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Meta != nil { - { - size, err := m.Meta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Meta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Meta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Hostname) > 0 { - i -= len(m.Hostname) - copy(dAtA[i:], m.Hostname) - i = encodeVarintOps(dAtA, i, uint64(len(m.Hostname))) - i-- - dAtA[i] = 0x3a - } - if len(m.ExtraHosts) > 0 { - for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ExtraHosts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - if m.ProxyEnv != nil { - { - size, err := m.ProxyEnv.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.User) > 0 { - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintOps(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x22 - } - if len(m.Cwd) > 0 { - i -= len(m.Cwd) - copy(dAtA[i:], m.Cwd) - i = encodeVarintOps(dAtA, i, uint64(len(m.Cwd))) - i-- - dAtA[i] = 0x1a - } - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Mount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Mount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResultID) > 0 { - i -= len(m.ResultID) - copy(dAtA[i:], m.ResultID) - i = encodeVarintOps(dAtA, i, uint64(len(m.ResultID))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - if m.SSHOpt != nil { - { - size, err := m.SSHOpt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if m.SecretOpt != nil { - { - size, err := m.SecretOpt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - if m.CacheOpt != nil { - { - size, err := m.CacheOpt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - if m.MountType != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.MountType)) - i-- - dAtA[i] = 0x30 - } - if m.Readonly { - i-- - if m.Readonly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Output != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Output)) - i-- - dAtA[i] = 0x20 - } - if len(m.Dest) > 0 { - i -= len(m.Dest) - copy(dAtA[i:], m.Dest) - i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) - i-- - dAtA[i] = 0x1a - } - if len(m.Selector) > 0 { - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0x12 - } - if m.Input != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CacheOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CacheOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Sharing != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Sharing)) - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SecretOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretOpt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SecretOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Optional { - i-- - if m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Mode != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) - i-- - dAtA[i] = 0x20 - } - if m.Gid != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Gid)) - i-- - dAtA[i] = 0x18 - } - if m.Uid != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Uid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SSHOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SSHOpt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SSHOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Optional { - i-- - if m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Mode != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) - i-- - dAtA[i] = 0x20 - } - if m.Gid != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Gid)) - i-- - dAtA[i] = 0x18 - } - if m.Uid != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Uid)) - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SourceOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SourceOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attrs) > 0 { - keysForAttrs := make([]string, 0, len(m.Attrs)) - for k := range m.Attrs { - keysForAttrs = append(keysForAttrs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) - for iNdEx := len(keysForAttrs) - 1; iNdEx >= 0; iNdEx-- { - v := m.Attrs[string(keysForAttrs[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAttrs[iNdEx]) - copy(dAtA[i:], keysForAttrs[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(keysForAttrs[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintOps(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Identifier) > 0 { - i -= len(m.Identifier) - copy(dAtA[i:], m.Identifier) - i = encodeVarintOps(dAtA, i, uint64(len(m.Identifier))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *BuildOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BuildOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attrs) > 0 { - keysForAttrs := make([]string, 0, len(m.Attrs)) - for k := range m.Attrs { - keysForAttrs = append(keysForAttrs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) - for iNdEx := len(keysForAttrs) - 1; iNdEx >= 0; iNdEx-- { - v := m.Attrs[string(keysForAttrs[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAttrs[iNdEx]) - copy(dAtA[i:], keysForAttrs[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(keysForAttrs[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintOps(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - if m.Def != nil { - { - size, err := m.Def.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Inputs) > 0 { - keysForInputs := make([]string, 0, len(m.Inputs)) - for k := range m.Inputs { - keysForInputs = append(keysForInputs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForInputs) - for iNdEx := len(keysForInputs) - 1; iNdEx >= 0; iNdEx-- { - v := m.Inputs[string(keysForInputs[iNdEx])] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(keysForInputs[iNdEx]) - copy(dAtA[i:], keysForInputs[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(keysForInputs[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintOps(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if m.Builder != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Builder)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *BuildInput) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BuildInput) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BuildInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Input != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *OpMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OpMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Caps) > 0 { - keysForCaps := make([]string, 0, len(m.Caps)) - for k := range m.Caps { - keysForCaps = append(keysForCaps, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCaps) - for iNdEx := len(keysForCaps) - 1; iNdEx >= 0; iNdEx-- { - v := m.Caps[github_com_moby_buildkit_util_apicaps.CapID(keysForCaps[iNdEx])] - baseI := i - i-- - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(keysForCaps[iNdEx]) - copy(dAtA[i:], keysForCaps[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(keysForCaps[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintOps(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } - } - if m.ExportCache != nil { - { - size, err := m.ExportCache.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Description) > 0 { - keysForDescription := make([]string, 0, len(m.Description)) - for k := range m.Description { - keysForDescription = append(keysForDescription, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) - for iNdEx := len(keysForDescription) - 1; iNdEx >= 0; iNdEx-- { - v := m.Description[string(keysForDescription[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForDescription[iNdEx]) - copy(dAtA[i:], keysForDescription[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(keysForDescription[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintOps(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if m.IgnoreCache { - i-- - if m.IgnoreCache { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Source) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Source) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Source) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Infos) > 0 { - for iNdEx := len(m.Infos) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Infos[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Locations) > 0 { - keysForLocations := make([]string, 0, len(m.Locations)) - for k := range m.Locations { - keysForLocations = append(keysForLocations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLocations) - for iNdEx := len(keysForLocations) - 1; iNdEx >= 0; iNdEx-- { - v := m.Locations[string(keysForLocations[iNdEx])] - baseI := i - if v != nil { - { - size, err := v.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(keysForLocations[iNdEx]) - copy(dAtA[i:], keysForLocations[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(keysForLocations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintOps(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Locations) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Locations) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Locations) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Locations) > 0 { - for iNdEx := len(m.Locations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Locations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SourceInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SourceInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SourceInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Definition != nil { - { - size, err := m.Definition.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintOps(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x12 - } - if len(m.Filename) > 0 { - i -= len(m.Filename) - copy(dAtA[i:], m.Filename) - i = encodeVarintOps(dAtA, i, uint64(len(m.Filename))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Location) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Location) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Location) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.SourceIndex != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.SourceIndex)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *Range) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Range) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Range) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.End.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Start.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Position) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Position) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Position) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Character != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Character)) - i-- - dAtA[i] = 0x10 - } - if m.Line != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Line)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ExportCache) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportCache) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportCache) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Value { - i-- - if m.Value { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProxyEnv) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProxyEnv) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AllProxy) > 0 { - i -= len(m.AllProxy) - copy(dAtA[i:], m.AllProxy) - i = encodeVarintOps(dAtA, i, uint64(len(m.AllProxy))) - i-- - dAtA[i] = 0x2a - } - if len(m.NoProxy) > 0 { - i -= len(m.NoProxy) - copy(dAtA[i:], m.NoProxy) - i = encodeVarintOps(dAtA, i, uint64(len(m.NoProxy))) - i-- - dAtA[i] = 0x22 - } - if len(m.FtpProxy) > 0 { - i -= len(m.FtpProxy) - copy(dAtA[i:], m.FtpProxy) - i = encodeVarintOps(dAtA, i, uint64(len(m.FtpProxy))) - i-- - dAtA[i] = 0x1a - } - if len(m.HttpsProxy) > 0 { - i -= len(m.HttpsProxy) - copy(dAtA[i:], m.HttpsProxy) - i = encodeVarintOps(dAtA, i, uint64(len(m.HttpsProxy))) - i-- - dAtA[i] = 0x12 - } - if len(m.HttpProxy) > 0 { - i -= len(m.HttpProxy) - copy(dAtA[i:], m.HttpProxy) - i = encodeVarintOps(dAtA, i, uint64(len(m.HttpProxy))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WorkerConstraints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WorkerConstraints) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WorkerConstraints) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Filter) > 0 { - for iNdEx := len(m.Filter) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Filter[iNdEx]) - copy(dAtA[i:], m.Filter[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(m.Filter[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Definition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Definition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Definition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Source != nil { - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for iNdEx := len(keysForMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.Metadata[github_com_opencontainers_go_digest.Digest(keysForMetadata[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForMetadata[iNdEx]) - copy(dAtA[i:], keysForMetadata[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(keysForMetadata[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintOps(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Def) > 0 { - for iNdEx := len(m.Def) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Def[iNdEx]) - copy(dAtA[i:], m.Def[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(m.Def[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *HostIP) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FileOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileOp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Actions) > 0 { - for iNdEx := len(m.Actions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Actions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - return len(dAtA) - i, nil -} - -func (m *FileAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileAction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Action != nil { - { - size := m.Action.Size() - i -= size - if _, err := m.Action.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - if m.Output != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Output)) - i-- - dAtA[i] = 0x18 - } - if m.SecondaryInput != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.SecondaryInput)) - i-- - dAtA[i] = 0x10 - } - if m.Input != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *FileAction_Copy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileAction_Copy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Copy != nil { - { - size, err := m.Copy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - return len(dAtA) - i, nil -} -func (m *FileAction_Mkfile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileAction_Mkfile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Mkfile != nil { - { - size, err := m.Mkfile.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - return len(dAtA) - i, nil -} -func (m *FileAction_Mkdir) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileAction_Mkdir) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Mkdir != nil { - { - size, err := m.Mkdir.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - return len(dAtA) - i, nil -} -func (m *FileAction_Rm) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileAction_Rm) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.Rm != nil { - { - size, err := m.Rm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - return len(dAtA) - i, nil -} -func (m *FileActionCopy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileActionCopy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ExcludePatterns) > 0 { - for iNdEx := len(m.ExcludePatterns) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExcludePatterns[iNdEx]) - copy(dAtA[i:], m.ExcludePatterns[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(m.ExcludePatterns[iNdEx]))) - i-- - dAtA[i] = 0x6a - } - } - if len(m.IncludePatterns) > 0 { - for iNdEx := len(m.IncludePatterns) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.IncludePatterns[iNdEx]) - copy(dAtA[i:], m.IncludePatterns[iNdEx]) - i = encodeVarintOps(dAtA, i, uint64(len(m.IncludePatterns[iNdEx]))) - i-- - dAtA[i] = 0x62 - } - } - if m.Timestamp != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x58 - } - if m.AllowEmptyWildcard { - i-- - if m.AllowEmptyWildcard { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - } - if m.AllowWildcard { - i-- - if m.AllowWildcard { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - } - if m.CreateDestPath { - i-- - if m.CreateDestPath { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - } - if m.AttemptUnpackDockerCompatibility { - i-- - if m.AttemptUnpackDockerCompatibility { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.DirCopyContents { - i-- - if m.DirCopyContents { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.FollowSymlink { - i-- - if m.FollowSymlink { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.Mode != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) - i-- - dAtA[i] = 0x20 - } - if m.Owner != nil { - { - size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Dest) > 0 { - i -= len(m.Dest) - copy(dAtA[i:], m.Dest) - i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) - i-- - dAtA[i] = 0x12 - } - if len(m.Src) > 0 { - i -= len(m.Src) - copy(dAtA[i:], m.Src) - i = encodeVarintOps(dAtA, i, uint64(len(m.Src))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FileActionMkFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileActionMkFile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileActionMkFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Timestamp != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x28 - } - if m.Owner != nil { - { - size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintOps(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a - } - if m.Mode != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) - i-- - dAtA[i] = 0x10 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FileActionMkDir) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileActionMkDir) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileActionMkDir) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Timestamp != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) - i-- - dAtA[i] = 0x28 - } - if m.Owner != nil { - { - size, err := m.Owner.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.MakeParents { - i-- - if m.MakeParents { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.Mode != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) - i-- - dAtA[i] = 0x10 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *FileActionRm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FileActionRm) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FileActionRm) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.AllowWildcard { - i-- - if m.AllowWildcard { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.AllowNotFound { - i-- - if m.AllowNotFound { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ChownOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ChownOpt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ChownOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Group != nil { - { - size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.User != nil { - { - size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *UserOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserOpt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.User != nil { - { - size := m.User.Size() - i -= size - if _, err := m.User.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - } - } - return len(dAtA) - i, nil -} - -func (m *UserOpt_ByName) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserOpt_ByName) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - if m.ByName != nil { - { - size, err := m.ByName.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOps(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} -func (m *UserOpt_ByID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserOpt_ByID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - i = encodeVarintOps(dAtA, i, uint64(m.ByID)) - i-- - dAtA[i] = 0x10 - return len(dAtA) - i, nil -} -func (m *NamedUserOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamedUserOpt) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamedUserOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Input != 0 { - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - i-- - dAtA[i] = 0x10 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintOps(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintOps(dAtA []byte, offset int, v uint64) int { - offset -= sovOps(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Op) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Inputs) > 0 { - for _, e := range m.Inputs { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - if m.Op != nil { - n += m.Op.Size() - } - if m.Platform != nil { - l = m.Platform.Size() - n += 1 + l + sovOps(uint64(l)) - } - if m.Constraints != nil { - l = m.Constraints.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *Op_Exec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Op_Source) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Source != nil { - l = m.Source.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Op_File) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.File != nil { - l = m.File.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Op_Build) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Build != nil { - l = m.Build.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *Platform) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Architecture) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.OS) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.Variant) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.OSVersion) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if len(m.OSFeatures) > 0 { - for _, s := range m.OSFeatures { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *Input) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Index != 0 { - n += 1 + sovOps(uint64(m.Index)) - } - return n -} - -func (m *ExecOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Meta != nil { - l = m.Meta.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - if m.Network != 0 { - n += 1 + sovOps(uint64(m.Network)) - } - if m.Security != 0 { - n += 1 + sovOps(uint64(m.Security)) - } - return n -} - -func (m *Meta) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - l = len(m.Cwd) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.User) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.ProxyEnv != nil { - l = m.ProxyEnv.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.ExtraHosts) > 0 { - for _, e := range m.ExtraHosts { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - l = len(m.Hostname) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *Mount) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - l = len(m.Selector) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.Dest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Output != 0 { - n += 1 + sovOps(uint64(m.Output)) - } - if m.Readonly { - n += 2 - } - if m.MountType != 0 { - n += 1 + sovOps(uint64(m.MountType)) - } - if m.CacheOpt != nil { - l = m.CacheOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - if m.SecretOpt != nil { - l = m.SecretOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - if m.SSHOpt != nil { - l = m.SSHOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - l = len(m.ResultID) - if l > 0 { - n += 2 + l + sovOps(uint64(l)) - } - return n -} - -func (m *CacheOpt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Sharing != 0 { - n += 1 + sovOps(uint64(m.Sharing)) - } - return n -} - -func (m *SecretOpt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Uid != 0 { - n += 1 + sovOps(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovOps(uint64(m.Gid)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - if m.Optional { - n += 2 - } - return n -} - -func (m *SSHOpt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Uid != 0 { - n += 1 + sovOps(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovOps(uint64(m.Gid)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - if m.Optional { - n += 2 - } - return n -} - -func (m *SourceOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Identifier) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - return n -} - -func (m *BuildOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Builder != 0 { - n += 1 + sovOps(uint64(m.Builder)) - } - if len(m.Inputs) > 0 { - for k, v := range m.Inputs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovOps(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - if m.Def != nil { - l = m.Def.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - return n -} - -func (m *BuildInput) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - return n -} - -func (m *OpMetadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.IgnoreCache { - n += 2 - } - if len(m.Description) > 0 { - for k, v := range m.Description { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - if m.ExportCache != nil { - l = m.ExportCache.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Caps) > 0 { - for k, v := range m.Caps { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - return n -} - -func (m *Source) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Locations) > 0 { - for k, v := range m.Locations { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovOps(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - if len(m.Infos) > 0 { - for _, e := range m.Infos { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *Locations) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Locations) > 0 { - for _, e := range m.Locations { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *SourceInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Filename) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Definition != nil { - l = m.Definition.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *Location) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SourceIndex != 0 { - n += 1 + sovOps(uint64(m.SourceIndex)) - } - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *Range) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Start.Size() - n += 1 + l + sovOps(uint64(l)) - l = m.End.Size() - n += 1 + l + sovOps(uint64(l)) - return n -} - -func (m *Position) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Line != 0 { - n += 1 + sovOps(uint64(m.Line)) - } - if m.Character != 0 { - n += 1 + sovOps(uint64(m.Character)) - } - return n -} - -func (m *ExportCache) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Value { - n += 2 - } - return n -} - -func (m *ProxyEnv) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.HttpProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.HttpsProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.FtpProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.NoProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.AllProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *WorkerConstraints) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *Definition) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Def) > 0 { - for _, b := range m.Def { - l = len(b) - n += 1 + l + sovOps(uint64(l)) - } - } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + l + sovOps(uint64(l)) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } - } - if m.Source != nil { - l = m.Source.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *HostIP) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.IP) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *FileOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Actions) > 0 { - for _, e := range m.Actions { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *FileAction) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - if m.SecondaryInput != 0 { - n += 1 + sovOps(uint64(m.SecondaryInput)) - } - if m.Output != 0 { - n += 1 + sovOps(uint64(m.Output)) - } - if m.Action != nil { - n += m.Action.Size() - } - return n -} - -func (m *FileAction_Copy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Copy != nil { - l = m.Copy.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *FileAction_Mkfile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Mkfile != nil { - l = m.Mkfile.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *FileAction_Mkdir) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Mkdir != nil { - l = m.Mkdir.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *FileAction_Rm) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Rm != nil { - l = m.Rm.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *FileActionCopy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Src) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.Dest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Owner != nil { - l = m.Owner.Size() - n += 1 + l + sovOps(uint64(l)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - if m.FollowSymlink { - n += 2 - } - if m.DirCopyContents { - n += 2 - } - if m.AttemptUnpackDockerCompatibility { - n += 2 - } - if m.CreateDestPath { - n += 2 - } - if m.AllowWildcard { - n += 2 - } - if m.AllowEmptyWildcard { - n += 2 - } - if m.Timestamp != 0 { - n += 1 + sovOps(uint64(m.Timestamp)) - } - if len(m.IncludePatterns) > 0 { - for _, s := range m.IncludePatterns { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - if len(m.ExcludePatterns) > 0 { - for _, s := range m.ExcludePatterns { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - return n -} - -func (m *FileActionMkFile) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Owner != nil { - l = m.Owner.Size() - n += 1 + l + sovOps(uint64(l)) - } - if m.Timestamp != 0 { - n += 1 + sovOps(uint64(m.Timestamp)) - } - return n -} - -func (m *FileActionMkDir) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - if m.MakeParents { - n += 2 - } - if m.Owner != nil { - l = m.Owner.Size() - n += 1 + l + sovOps(uint64(l)) - } - if m.Timestamp != 0 { - n += 1 + sovOps(uint64(m.Timestamp)) - } - return n -} - -func (m *FileActionRm) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.AllowNotFound { - n += 2 - } - if m.AllowWildcard { - n += 2 - } - return n -} - -func (m *ChownOpt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.User != nil { - l = m.User.Size() - n += 1 + l + sovOps(uint64(l)) - } - if m.Group != nil { - l = m.Group.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *UserOpt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.User != nil { - n += m.User.Size() - } - return n -} - -func (m *UserOpt_ByName) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ByName != nil { - l = m.ByName.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} -func (m *UserOpt_ByID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovOps(uint64(m.ByID)) - return n -} -func (m *NamedUserOpt) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - return n -} - -func sovOps(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOps(x uint64) (n int) { - return sovOps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Op) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Op: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Op: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Inputs = append(m.Inputs, &Input{}) - if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &ExecOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Exec{v} - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &SourceOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Source{v} - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &FileOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_File{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Build", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &BuildOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Build{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Platform == nil { - m.Platform = &Platform{} - } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Constraints == nil { - m.Constraints = &WorkerConstraints{} - } - if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Platform) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Platform: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Architecture = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OS = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Variant = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OSVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSFeatures", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OSFeatures = append(m.OSFeatures, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Input) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Input: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Input: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Index |= OutputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Meta == nil { - m.Meta = &Meta{} - } - if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mounts = append(m.Mounts, &Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - m.Network = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Network |= NetMode(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Security", wireType) - } - m.Security = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Security |= SecurityMode(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Meta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Meta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cwd", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cwd = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProxyEnv", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ProxyEnv == nil { - m.ProxyEnv = &ProxyEnv{} - } - if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExtraHosts = append(m.ExtraHosts, &HostIP{}) - if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Mount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Mount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= InputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dest = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) - } - m.Output = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Output |= OutputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Readonly = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MountType", wireType) - } - m.MountType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MountType |= MountType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CacheOpt == nil { - m.CacheOpt = &CacheOpt{} - } - if err := m.CacheOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretOpt == nil { - m.SecretOpt = &SecretOpt{} - } - if err := m.SecretOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SSHOpt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SSHOpt == nil { - m.SSHOpt = &SSHOpt{} - } - if err := m.SSHOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResultID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResultID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CacheOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CacheOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sharing", wireType) - } - m.Sharing = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Sharing |= CacheSharingOpt(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Optional = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SSHOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SSHOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SSHOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Optional = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SourceOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SourceOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SourceOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Identifier = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attrs == nil { - m.Attrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BuildOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BuildOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BuildOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Builder", wireType) - } - m.Builder = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Builder |= InputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inputs == nil { - m.Inputs = make(map[string]*BuildInput) - } - var mapkey string - var mapvalue *BuildInput - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthOps - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &BuildInput{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Inputs[mapkey] = mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Def == nil { - m.Def = &Definition{} - } - if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Attrs == nil { - m.Attrs = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Attrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BuildInput) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BuildInput: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BuildInput: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= InputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OpMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OpMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OpMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreCache", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IgnoreCache = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Description == nil { - m.Description = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Description[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportCache", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ExportCache == nil { - m.ExportCache = &ExportCache{} - } - if err := m.ExportCache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Caps", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Caps == nil { - m.Caps = make(map[github_com_moby_buildkit_util_apicaps.CapID]bool) - } - var mapkey github_com_moby_buildkit_util_apicaps.CapID - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = github_com_moby_buildkit_util_apicaps.CapID(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Caps[github_com_moby_buildkit_util_apicaps.CapID(mapkey)] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Source) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Source: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Source: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Locations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Locations == nil { - m.Locations = make(map[string]*Locations) - } - var mapkey string - var mapvalue *Locations - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthOps - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &Locations{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Locations[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Infos", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Infos = append(m.Infos, &SourceInfo{}) - if err := m.Infos[len(m.Infos)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Locations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Locations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Locations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Locations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Locations = append(m.Locations, &Location{}) - if err := m.Locations[len(m.Locations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SourceInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SourceInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SourceInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filename", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filename = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Definition", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Definition == nil { - m.Definition = &Definition{} - } - if err := m.Definition.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Location) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Location: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Location: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SourceIndex", wireType) - } - m.SourceIndex = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SourceIndex |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, &Range{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Range) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Range: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Range: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Start.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.End.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Position) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Position: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Position: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Line", wireType) - } - m.Line = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Line |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Character", wireType) - } - m.Character = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Character |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportCache) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportCache: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportCache: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Value = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProxyEnv) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProxyEnv: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProxyEnv: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HttpProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpsProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HttpsProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FtpProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FtpProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NoProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllProxy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WorkerConstraints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WorkerConstraints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Definition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Definition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Definition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Def = append(m.Def, make([]byte, postIndex-iNdEx)) - copy(m.Def[len(m.Def)-1], dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = make(map[github_com_opencontainers_go_digest.Digest]OpMetadata) - } - var mapkey github_com_opencontainers_go_digest.Digest - mapvalue := &OpMetadata{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthOps - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthOps - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &OpMetadata{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Metadata[github_com_opencontainers_go_digest.Digest(mapkey)] = *mapvalue - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Source == nil { - m.Source = &Source{} - } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostIP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostIP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Actions = append(m.Actions, &FileAction{}) - if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= InputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SecondaryInput", wireType) - } - m.SecondaryInput = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SecondaryInput |= InputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) - } - m.Output = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Output |= OutputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Copy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &FileActionCopy{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Action = &FileAction_Copy{v} - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mkfile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &FileActionMkFile{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Action = &FileAction_Mkfile{v} - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mkdir", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &FileActionMkDir{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Action = &FileAction_Mkdir{v} - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &FileActionRm{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Action = &FileAction_Rm{v} - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileActionCopy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileActionCopy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileActionCopy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Src", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Src = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Dest = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Owner == nil { - m.Owner = &ChownOpt{} - } - if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FollowSymlink", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.FollowSymlink = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DirCopyContents", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DirCopyContents = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttemptUnpackDockerCompatibility", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AttemptUnpackDockerCompatibility = bool(v != 0) - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreateDestPath", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.CreateDestPath = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowWildcard", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowWildcard = bool(v != 0) - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyWildcard", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowEmptyWildcard = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludePatterns", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludePatterns = append(m.IncludePatterns, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExcludePatterns", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExcludePatterns = append(m.ExcludePatterns, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileActionMkFile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileActionMkFile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileActionMkFile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Owner == nil { - m.Owner = &ChownOpt{} - } - if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileActionMkDir) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileActionMkDir: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileActionMkDir: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MakeParents", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.MakeParents = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Owner == nil { - m.Owner = &ChownOpt{} - } - if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - m.Timestamp = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Timestamp |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FileActionRm) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FileActionRm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FileActionRm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowNotFound", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowNotFound = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowWildcard", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowWildcard = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ChownOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ChownOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ChownOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.User == nil { - m.User = &UserOpt{} - } - if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Group == nil { - m.Group = &UserOpt{} - } - if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ByName", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &NamedUserOpt{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.User = &UserOpt_ByName{v} - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ByID", wireType) - } - var v uint32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.User = &UserOpt_ByID{v} - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamedUserOpt) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamedUserOpt: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamedUserOpt: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= InputIndex(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOps(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOps - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOps - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOps - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthOps = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOps = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto deleted file mode 100644 index 39983e0eeec2..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ /dev/null @@ -1,350 +0,0 @@ -syntax = "proto3"; - -// Package pb provides the protobuf definition of LLB: low-level builder instruction. -// LLB is DAG-structured; Op represents a vertex, and Definition represents a graph. -package pb; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.stable_marshaler_all) = true; - -// Op represents a vertex of the LLB DAG. -message Op { - // inputs is a set of input edges. - repeated Input inputs = 1; - oneof op { - ExecOp exec = 2; - SourceOp source = 3; - FileOp file = 4; - BuildOp build = 5; - } - Platform platform = 10; - WorkerConstraints constraints = 11; -} - -// Platform is github.com/opencontainers/image-spec/specs-go/v1.Platform -message Platform { - string Architecture = 1; - string OS = 2; - string Variant = 3; - string OSVersion = 4; // unused - repeated string OSFeatures = 5; // unused -} - -// Input represents an input edge for an Op. -message Input { - // digest of the marshaled input Op - string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - // output index of the input Op - int64 index = 2 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; -} - -// ExecOp executes a command in a container. -message ExecOp { - Meta meta = 1; - repeated Mount mounts = 2; - NetMode network = 3; - SecurityMode security = 4; -} - -// Meta is a set of arguments for ExecOp. -// Meta is unrelated to LLB metadata. -// FIXME: rename (ExecContext? ExecArgs?) -message Meta { - repeated string args = 1; - repeated string env = 2; - string cwd = 3; - string user = 4; - ProxyEnv proxy_env = 5; - repeated HostIP extraHosts = 6; - string hostname = 7; -} - -enum NetMode { - UNSET = 0; // sandbox - HOST = 1; - NONE = 2; -} - -enum SecurityMode { - SANDBOX = 0; - INSECURE = 1; // privileged mode -} - -// Mount specifies how to mount an input Op as a filesystem. -message Mount { - int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; - string selector = 2; - string dest = 3; - int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; - bool readonly = 5; - MountType mountType = 6; - CacheOpt cacheOpt = 20; - SecretOpt secretOpt = 21; - SSHOpt SSHOpt = 22; - string resultID = 23; -} - -// MountType defines a type of a mount from a supported set -enum MountType { - BIND = 0; - SECRET = 1; - SSH = 2; - CACHE = 3; - TMPFS = 4; -} - -// CacheOpt defines options specific to cache mounts -message CacheOpt { - // ID is an optional namespace for the mount - string ID = 1; - // Sharing is the sharing mode for the mount - CacheSharingOpt sharing = 2; -} - -// CacheSharingOpt defines different sharing modes for cache mount -enum CacheSharingOpt { - // SHARED cache mount can be used concurrently by multiple writers - SHARED = 0; - // PRIVATE creates a new mount if there are multiple writers - PRIVATE = 1; - // LOCKED pauses second writer until first one releases the mount - LOCKED = 2; -} - -// SecretOpt defines options describing secret mounts -message SecretOpt { - // ID of secret. Used for quering the value. - string ID = 1; - // UID of secret file - uint32 uid = 2; - // GID of secret file - uint32 gid = 3; - // Mode is the filesystem mode of secret file - uint32 mode = 4; - // Optional defines if secret value is required. Error is produced - // if value is not found and optional is false. - bool optional = 5; -} - -// SSHOpt defines options describing secret mounts -message SSHOpt { - // ID of exposed ssh rule. Used for quering the value. - string ID = 1; - // UID of agent socket - uint32 uid = 2; - // GID of agent socket - uint32 gid = 3; - // Mode is the filesystem mode of agent socket - uint32 mode = 4; - // Optional defines if ssh socket is required. Error is produced - // if client does not expose ssh. - bool optional = 5; -} - -// SourceOp specifies a source such as build contexts and images. -message SourceOp { - // TODO: use source type or any type instead of URL protocol. - // identifier e.g. local://, docker-image://, git://, https://... - string identifier = 1; - // attrs are defined in attr.go - map attrs = 2; -} - -// BuildOp is used for nested build invocation. -// BuildOp is experimental and can break without backwards compatibility -message BuildOp { - int64 builder = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; - map inputs = 2; - Definition def = 3; - map attrs = 4; - // outputs -} - -// BuildInput is used for BuildOp. -message BuildInput { - int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; -} - -// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time. -message OpMetadata { - // ignore_cache specifies to ignore the cache for this Op. - bool ignore_cache = 1; - // Description can be used for keeping any text fields that builder doesn't parse - map description = 2; - // index 3 reserved for WorkerConstraint in previous versions - // WorkerConstraint worker_constraint = 3; - ExportCache export_cache = 4; - - map caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false]; -} - -// Source is a source mapping description for a file -message Source { - map locations = 1; - repeated SourceInfo infos = 2; -} - -// Locations is a list of ranges with a index to its source map. -message Locations { - repeated Location locations = 1; -} - -// Source info contains the shared metadata of a source mapping -message SourceInfo { - string filename = 1; - bytes data = 2; - Definition definition = 3; -} - -// Location defines list of areas in to source file -message Location { - int32 sourceIndex = 1; - repeated Range ranges = 2; -} - -// Range is an area in the source file -message Range { - Position start = 1 [(gogoproto.nullable) = false]; - Position end = 2 [(gogoproto.nullable) = false]; -} - -// Position is single location in a source file -message Position { - int32 Line = 1; - int32 Character = 2; -} - -message ExportCache { - bool Value = 1; -} - -message ProxyEnv { - string http_proxy = 1; - string https_proxy = 2; - string ftp_proxy = 3; - string no_proxy = 4; - string all_proxy = 5; -} - -// WorkerConstraints defines conditions for the worker -message WorkerConstraints { - repeated string filter = 1; // containerd-style filter -} - -// Definition is the LLB definition structure with per-vertex metadata entries -message Definition { - // def is a list of marshaled Op messages - repeated bytes def = 1; - // metadata contains metadata for the each of the Op messages. - // A key must be an LLB op digest string. Currently, empty string is not expected as a key, but it may change in the future. - map metadata = 2 [(gogoproto.castkey) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; - // Source contains the source mapping information for the vertexes in the definition - Source Source = 3; -} - -message HostIP { - string Host = 1; - string IP = 2; -} - -message FileOp { - repeated FileAction actions = 2; -} - -message FileAction { - int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index) - int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//-- - int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; - oneof action { - // FileActionCopy copies files from secondaryInput on top of input - FileActionCopy copy = 4; - // FileActionMkFile creates a new file - FileActionMkFile mkfile = 5; - // FileActionMkDir creates a new directory - FileActionMkDir mkdir = 6; - // FileActionRm removes a file - FileActionRm rm = 7; - } -} - -message FileActionCopy { - // src is the source path - string src = 1; - // dest path - string dest = 2; - // optional owner override - ChownOpt owner = 3; - // optional permission bits override - int32 mode = 4; - // followSymlink resolves symlinks in src - bool followSymlink = 5; - // dirCopyContents only copies contents if src is a directory - bool dirCopyContents = 6; - // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead - bool attemptUnpackDockerCompatibility = 7; - // createDestPath creates dest path directories if needed - bool createDestPath = 8; - // allowWildcard allows filepath.Match wildcards in src path - bool allowWildcard = 9; - // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files - bool allowEmptyWildcard = 10; - // optional created time override - int64 timestamp = 11; - // include only files/dirs matching at least one of these patterns - repeated string include_patterns = 12; - // exclude files/dir matching any of these patterns (even if they match an include pattern) - repeated string exclude_patterns = 13; -} - -message FileActionMkFile { - // path for the new file - string path = 1; - // permission bits - int32 mode = 2; - // data is the new file contents - bytes data = 3; - // optional owner for the new file - ChownOpt owner = 4; - // optional created time override - int64 timestamp = 5; -} - -message FileActionMkDir { - // path for the new directory - string path = 1; - // permission bits - int32 mode = 2; - // makeParents creates parent directories as well if needed - bool makeParents = 3; - // optional owner for the new directory - ChownOpt owner = 4; - // optional created time override - int64 timestamp = 5; -} - -message FileActionRm { - // path to remove - string path = 1; - // allowNotFound doesn't fail the rm if file is not found - bool allowNotFound = 2; - // allowWildcard allows filepath.Match wildcards in path - bool allowWildcard = 3; -} - -message ChownOpt { - UserOpt user = 1; - UserOpt group = 2; -} - -message UserOpt { - oneof user { - NamedUserOpt byName = 1; - uint32 byID = 2; - } -} - -message NamedUserOpt { - string name = 1; - int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; -} diff --git a/vendor/github.com/moby/buildkit/solver/pb/platform.go b/vendor/github.com/moby/buildkit/solver/pb/platform.go deleted file mode 100644 index a434aa716882..000000000000 --- a/vendor/github.com/moby/buildkit/solver/pb/platform.go +++ /dev/null @@ -1,41 +0,0 @@ -package pb - -import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" -) - -func (p *Platform) Spec() specs.Platform { - return specs.Platform{ - OS: p.OS, - Architecture: p.Architecture, - Variant: p.Variant, - OSVersion: p.OSVersion, - OSFeatures: p.OSFeatures, - } -} - -func PlatformFromSpec(p specs.Platform) Platform { - return Platform{ - OS: p.OS, - Architecture: p.Architecture, - Variant: p.Variant, - OSVersion: p.OSVersion, - OSFeatures: p.OSFeatures, - } -} - -func ToSpecPlatforms(p []Platform) []specs.Platform { - out := make([]specs.Platform, 0, len(p)) - for _, pp := range p { - out = append(out, pp.Spec()) - } - return out -} - -func PlatformsFromSpec(p []specs.Platform) []Platform { - out := make([]Platform, 0, len(p)) - for _, pp := range p { - out = append(out, PlatformFromSpec(pp)) - } - return out -} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/caps.go b/vendor/github.com/moby/buildkit/util/apicaps/caps.go deleted file mode 100644 index 84a76a81f0b2..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/caps.go +++ /dev/null @@ -1,169 +0,0 @@ -package apicaps - -import ( - "fmt" - "sort" - "strings" - - pb "github.com/moby/buildkit/util/apicaps/pb" - "github.com/pkg/errors" -) - -type PBCap = pb.APICap - -// ExportedProduct is the name of the product using this package. -// Users vendoring this library may override it to provide better versioning hints -// for their users (or set it with a flag to buildkitd). -var ExportedProduct string - -// CapStatus defines the stability properties of a capability -type CapStatus int - -const ( - // CapStatusStable refers to a capability that should never be changed in - // backwards incompatible manner unless there is a serious security issue. - CapStatusStable CapStatus = iota - // CapStatusExperimental refers to a capability that may be removed in the future. - // If incompatible changes are made the previous ID is disabled and new is added. - CapStatusExperimental - // CapStatusPrerelease is same as CapStatusExperimental that can be used for new - // features before they move to stable. - CapStatusPrerelease -) - -// CapID is type for capability identifier -type CapID string - -// Cap describes an API feature -type Cap struct { - ID CapID - Name string // readable name, may contain spaces but keep in one sentence - Status CapStatus - Enabled bool - Deprecated bool - SupportedHint map[string]string - DisabledReason string - DisabledReasonMsg string - DisabledAlternative string -} - -// CapList is a collection of capability definitions -type CapList struct { - m map[CapID]Cap -} - -// Init initializes definition for a new capability. -// Not safe to be called concurrently with other methods. -func (l *CapList) Init(cc ...Cap) { - if l.m == nil { - l.m = make(map[CapID]Cap, len(cc)) - } - for _, c := range cc { - l.m[c.ID] = c - } -} - -// All reports the configuration of all known capabilities -func (l *CapList) All() []pb.APICap { - out := make([]pb.APICap, 0, len(l.m)) - for _, c := range l.m { - out = append(out, pb.APICap{ - ID: string(c.ID), - Enabled: c.Enabled, - Deprecated: c.Deprecated, - DisabledReason: c.DisabledReason, - DisabledReasonMsg: c.DisabledReasonMsg, - DisabledAlternative: c.DisabledAlternative, - }) - } - sort.Slice(out, func(i, j int) bool { - return out[i].ID < out[j].ID - }) - return out -} - -// CapSet returns a CapSet for an capability configuration -func (l *CapList) CapSet(caps []pb.APICap) CapSet { - m := make(map[string]*pb.APICap, len(caps)) - for _, c := range caps { - if c.ID != "" { - c := c // capture loop iterator - m[c.ID] = &c - } - } - return CapSet{ - list: l, - set: m, - } -} - -// CapSet is a configuration for detecting supported capabilities -type CapSet struct { - list *CapList - set map[string]*pb.APICap -} - -// Supports returns an error if capability is not supported -func (s *CapSet) Supports(id CapID) error { - err := &CapError{ID: id} - c, ok := s.list.m[id] - if !ok { - return errors.WithStack(err) - } - err.Definition = &c - state, ok := s.set[string(id)] - if !ok { - return errors.WithStack(err) - } - err.State = state - if !state.Enabled { - return errors.WithStack(err) - } - return nil -} - -// Contains checks if cap set contains cap. Note that unlike Supports() this -// function only checks capability existence in remote set, not if cap has been initialized. -func (s *CapSet) Contains(id CapID) bool { - _, ok := s.set[string(id)] - return ok -} - -// CapError is an error for unsupported capability -type CapError struct { - ID CapID - Definition *Cap - State *pb.APICap -} - -func (e CapError) Error() string { - if e.Definition == nil { - return fmt.Sprintf("unknown API capability %s", e.ID) - } - typ := "" - if e.Definition.Status == CapStatusExperimental { - typ = "experimental " - } - if e.Definition.Status == CapStatusPrerelease { - typ = "prerelease " - } - name := "" - if e.Definition.Name != "" { - name = "(" + e.Definition.Name + ")" - } - b := &strings.Builder{} - fmt.Fprintf(b, "requested %sfeature %s %s", typ, e.ID, name) - if e.State == nil { - fmt.Fprint(b, " is not supported by build server") - if hint, ok := e.Definition.SupportedHint[ExportedProduct]; ok { - fmt.Fprintf(b, " (added in %s)", hint) - } - fmt.Fprintf(b, ", please update %s", ExportedProduct) - } else { - fmt.Fprint(b, " has been disabled on the build server") - if e.State.DisabledReasonMsg != "" { - fmt.Fprintf(b, ": %s", e.State.DisabledReasonMsg) - } - } - return b.String() -} diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go deleted file mode 100644 index e5768d3751f7..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.pb.go +++ /dev/null @@ -1,567 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: caps.proto - -package moby_buildkit_v1_apicaps - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// APICap defines a capability supported by the service -type APICap struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Enabled bool `protobuf:"varint,2,opt,name=Enabled,proto3" json:"Enabled,omitempty"` - Deprecated bool `protobuf:"varint,3,opt,name=Deprecated,proto3" json:"Deprecated,omitempty"` - DisabledReason string `protobuf:"bytes,4,opt,name=DisabledReason,proto3" json:"DisabledReason,omitempty"` - DisabledReasonMsg string `protobuf:"bytes,5,opt,name=DisabledReasonMsg,proto3" json:"DisabledReasonMsg,omitempty"` - DisabledAlternative string `protobuf:"bytes,6,opt,name=DisabledAlternative,proto3" json:"DisabledAlternative,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *APICap) Reset() { *m = APICap{} } -func (m *APICap) String() string { return proto.CompactTextString(m) } -func (*APICap) ProtoMessage() {} -func (*APICap) Descriptor() ([]byte, []int) { - return fileDescriptor_e19c39d9fcb89b83, []int{0} -} -func (m *APICap) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *APICap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_APICap.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *APICap) XXX_Merge(src proto.Message) { - xxx_messageInfo_APICap.Merge(m, src) -} -func (m *APICap) XXX_Size() int { - return m.Size() -} -func (m *APICap) XXX_DiscardUnknown() { - xxx_messageInfo_APICap.DiscardUnknown(m) -} - -var xxx_messageInfo_APICap proto.InternalMessageInfo - -func (m *APICap) GetID() string { - if m != nil { - return m.ID - } - return "" -} - -func (m *APICap) GetEnabled() bool { - if m != nil { - return m.Enabled - } - return false -} - -func (m *APICap) GetDeprecated() bool { - if m != nil { - return m.Deprecated - } - return false -} - -func (m *APICap) GetDisabledReason() string { - if m != nil { - return m.DisabledReason - } - return "" -} - -func (m *APICap) GetDisabledReasonMsg() string { - if m != nil { - return m.DisabledReasonMsg - } - return "" -} - -func (m *APICap) GetDisabledAlternative() string { - if m != nil { - return m.DisabledAlternative - } - return "" -} - -func init() { - proto.RegisterType((*APICap)(nil), "moby.buildkit.v1.apicaps.APICap") -} - -func init() { proto.RegisterFile("caps.proto", fileDescriptor_e19c39d9fcb89b83) } - -var fileDescriptor_e19c39d9fcb89b83 = []byte{ - // 236 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4a, 0x4e, 0x2c, 0x28, - 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0xc8, 0xcd, 0x4f, 0xaa, 0xd4, 0x4b, 0x2a, 0xcd, - 0xcc, 0x49, 0xc9, 0xce, 0x2c, 0xd1, 0x2b, 0x33, 0xd4, 0x4b, 0x2c, 0xc8, 0x04, 0xc9, 0x4b, 0xe9, - 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0xa7, 0xe7, 0xa7, 0xe7, 0xeb, - 0x83, 0x35, 0x24, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0x31, 0x48, 0xe9, 0x16, 0x23, - 0x17, 0x9b, 0x63, 0x80, 0xa7, 0x73, 0x62, 0x81, 0x10, 0x1f, 0x17, 0x93, 0xa7, 0x8b, 0x04, 0xa3, - 0x02, 0xa3, 0x06, 0x67, 0x10, 0x93, 0xa7, 0x8b, 0x90, 0x04, 0x17, 0xbb, 0x6b, 0x5e, 0x62, 0x52, - 0x4e, 0x6a, 0x8a, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x8c, 0x2b, 0x24, 0xc7, 0xc5, 0xe5, - 0x92, 0x5a, 0x50, 0x94, 0x9a, 0x9c, 0x58, 0x92, 0x9a, 0x22, 0xc1, 0x0c, 0x96, 0x44, 0x12, 0x11, - 0x52, 0xe3, 0xe2, 0x73, 0xc9, 0x2c, 0x06, 0xab, 0x0d, 0x4a, 0x4d, 0x2c, 0xce, 0xcf, 0x93, 0x60, - 0x01, 0x9b, 0x8a, 0x26, 0x2a, 0xa4, 0xc3, 0x25, 0x88, 0x2a, 0xe2, 0x5b, 0x9c, 0x2e, 0xc1, 0x0a, - 0x56, 0x8a, 0x29, 0x21, 0x64, 0xc0, 0x25, 0x0c, 0x13, 0x74, 0xcc, 0x29, 0x49, 0x2d, 0xca, 0x4b, - 0x2c, 0xc9, 0x2c, 0x4b, 0x95, 0x60, 0x03, 0xab, 0xc7, 0x26, 0xe5, 0xc4, 0x73, 0xe2, 0x91, 0x1c, - 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x26, 0xb1, 0x81, 0x7d, 0x6c, 0x0c, 0x08, - 0x00, 0x00, 0xff, 0xff, 0x02, 0x2d, 0x9e, 0x91, 0x48, 0x01, 0x00, 0x00, -} - -func (m *APICap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APICap) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *APICap) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i -= len(m.XXX_unrecognized) - copy(dAtA[i:], m.XXX_unrecognized) - } - if len(m.DisabledAlternative) > 0 { - i -= len(m.DisabledAlternative) - copy(dAtA[i:], m.DisabledAlternative) - i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledAlternative))) - i-- - dAtA[i] = 0x32 - } - if len(m.DisabledReasonMsg) > 0 { - i -= len(m.DisabledReasonMsg) - copy(dAtA[i:], m.DisabledReasonMsg) - i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReasonMsg))) - i-- - dAtA[i] = 0x2a - } - if len(m.DisabledReason) > 0 { - i -= len(m.DisabledReason) - copy(dAtA[i:], m.DisabledReason) - i = encodeVarintCaps(dAtA, i, uint64(len(m.DisabledReason))) - i-- - dAtA[i] = 0x22 - } - if m.Deprecated { - i-- - if m.Deprecated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.Enabled { - i-- - if m.Enabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.ID) > 0 { - i -= len(m.ID) - copy(dAtA[i:], m.ID) - i = encodeVarintCaps(dAtA, i, uint64(len(m.ID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintCaps(dAtA []byte, offset int, v uint64) int { - offset -= sovCaps(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *APICap) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - if m.Enabled { - n += 2 - } - if m.Deprecated { - n += 2 - } - l = len(m.DisabledReason) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - l = len(m.DisabledReasonMsg) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - l = len(m.DisabledAlternative) - if l > 0 { - n += 1 + l + sovCaps(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovCaps(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCaps(x uint64) (n int) { - return sovCaps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *APICap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APICap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APICap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCaps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Enabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Enabled = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Deprecated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Deprecated = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisabledReason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCaps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DisabledReason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisabledReasonMsg", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCaps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DisabledReasonMsg = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisabledAlternative", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCaps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCaps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCaps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DisabledAlternative = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCaps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCaps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCaps(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCaps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCaps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCaps - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCaps - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCaps - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCaps - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCaps = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCaps = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCaps = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto b/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto deleted file mode 100644 index 1e8c06517c51..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/caps.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package moby.buildkit.v1.apicaps; - -import "github.com/gogo/protobuf/gogoproto/gogo.proto"; - -option (gogoproto.sizer_all) = true; -option (gogoproto.marshaler_all) = true; -option (gogoproto.unmarshaler_all) = true; - -// APICap defines a capability supported by the service -message APICap { - string ID = 1; - bool Enabled = 2; - bool Deprecated = 3; // Unused. May be used for warnings in the future - string DisabledReason = 4; // Reason key for detection code - string DisabledReasonMsg = 5; // Message to the user - string DisabledAlternative = 6; // Identifier that updated client could catch. -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go deleted file mode 100644 index addfccfade01..000000000000 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package moby_buildkit_v1_apicaps //nolint:golint - -//go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go deleted file mode 100644 index 6252147e0d6c..000000000000 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build !windows - -package appdefaults - -import ( - "os" - "path/filepath" - "strings" -) - -const ( - Address = "unix:///run/buildkit/buildkitd.sock" - Root = "/var/lib/buildkit" - ConfigDir = "/etc/buildkit" -) - -// UserAddress typically returns /run/user/$UID/buildkit/buildkitd.sock -func UserAddress() string { - // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. - xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") - if xdgRuntimeDir != "" { - dirs := strings.Split(xdgRuntimeDir, ":") - return "unix://" + filepath.Join(dirs[0], "buildkit", "buildkitd.sock") - } - return Address -} - -// EnsureUserAddressDir sets sticky bit on XDG_RUNTIME_DIR if XDG_RUNTIME_DIR is set. -// See https://github.com/opencontainers/runc/issues/1694 -func EnsureUserAddressDir() error { - xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR") - if xdgRuntimeDir != "" { - dirs := strings.Split(xdgRuntimeDir, ":") - dir := filepath.Join(dirs[0], "buildkit") - if err := os.MkdirAll(dir, 0700); err != nil { - return err - } - return os.Chmod(dir, 0700|os.ModeSticky) - } - return nil -} - -// UserRoot typically returns /home/$USER/.local/share/buildkit -func UserRoot() string { - // pam_systemd sets XDG_RUNTIME_DIR but not other dirs. - xdgDataHome := os.Getenv("XDG_DATA_HOME") - if xdgDataHome != "" { - dirs := strings.Split(xdgDataHome, ":") - return filepath.Join(dirs[0], "buildkit") - } - home := os.Getenv("HOME") - if home != "" { - return filepath.Join(home, ".local", "share", "buildkit") - } - return Root -} - -// UserConfigDir returns dir for storing config. /home/$USER/.config/buildkit/ -func UserConfigDir() string { - xdgConfigHome := os.Getenv("XDG_CONFIG_HOME") - if xdgConfigHome != "" { - return filepath.Join(xdgConfigHome, "buildkit") - } - home := os.Getenv("HOME") - if home != "" { - return filepath.Join(home, ".config", "buildkit") - } - return ConfigDir -} diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go deleted file mode 100644 index d5d0ca1fb99f..000000000000 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_windows.go +++ /dev/null @@ -1,31 +0,0 @@ -package appdefaults - -import ( - "os" - "path/filepath" -) - -const ( - Address = "npipe:////./pipe/buildkitd" -) - -var ( - Root = filepath.Join(os.Getenv("ProgramData"), "buildkitd", ".buildstate") - ConfigDir = filepath.Join(os.Getenv("ProgramData"), "buildkitd") -) - -func UserAddress() string { - return Address -} - -func EnsureUserAddressDir() error { - return nil -} - -func UserRoot() string { - return Root -} - -func UserConfigDir() string { - return ConfigDir -} diff --git a/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go b/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go deleted file mode 100644 index f65b426bb201..000000000000 --- a/vendor/github.com/moby/buildkit/util/entitlements/entitlements.go +++ /dev/null @@ -1,60 +0,0 @@ -package entitlements - -import ( - "github.com/pkg/errors" -) - -type Entitlement string - -const ( - EntitlementSecurityInsecure Entitlement = "security.insecure" - EntitlementNetworkHost Entitlement = "network.host" -) - -var all = map[Entitlement]struct{}{ - EntitlementSecurityInsecure: {}, - EntitlementNetworkHost: {}, -} - -func Parse(s string) (Entitlement, error) { - _, ok := all[Entitlement(s)] - if !ok { - return "", errors.Errorf("unknown entitlement %s", s) - } - return Entitlement(s), nil -} - -func WhiteList(allowed, supported []Entitlement) (Set, error) { - m := map[Entitlement]struct{}{} - - var supm Set - if supported != nil { - var err error - supm, err = WhiteList(supported, nil) - if err != nil { // should not happen - return nil, err - } - } - - for _, e := range allowed { - e, err := Parse(string(e)) - if err != nil { - return nil, err - } - if supported != nil { - if !supm.Allowed(e) { - return nil, errors.Errorf("granting entitlement %s is not allowed by build daemon configuration", e) - } - } - m[e] = struct{}{} - } - - return Set(m), nil -} - -type Set map[Entitlement]struct{} - -func (s Set) Allowed(e Entitlement) bool { - _, ok := s[e] - return ok -} diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go deleted file mode 100644 index fc9f7272a42b..000000000000 --- a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go +++ /dev/null @@ -1,349 +0,0 @@ -package flightcontrol - -import ( - "context" - "io" - "runtime" - "sort" - "sync" - "time" - - "github.com/moby/buildkit/util/progress" - "github.com/pkg/errors" -) - -// flightcontrol is like singleflight but with support for cancellation and -// nested progress reporting - -var ( - errRetry = errors.Errorf("retry") - errRetryTimeout = errors.Errorf("exceeded retry timeout") -) - -type contextKeyT string - -var contextKey = contextKeyT("buildkit/util/flightcontrol.progress") - -// Group is a flightcontrol synchronization group -type Group struct { - mu sync.Mutex // protects m - m map[string]*call // lazily initialized -} - -// Do executes a context function syncronized by the key -func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (v interface{}, err error) { - var backoff time.Duration - for { - v, err = g.do(ctx, key, fn) - if err == nil || !errors.Is(err, errRetry) { - return v, err - } - // backoff logic - if backoff >= 3*time.Second { - err = errors.Wrapf(errRetryTimeout, "flightcontrol") - return v, err - } - runtime.Gosched() - if backoff > 0 { - time.Sleep(backoff) - backoff *= 2 - } else { - backoff = time.Millisecond - } - } -} - -func (g *Group) do(ctx context.Context, key string, fn func(ctx context.Context) (interface{}, error)) (interface{}, error) { - g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - - if c, ok := g.m[key]; ok { // register 2nd waiter - g.mu.Unlock() - return c.wait(ctx) - } - - c := newCall(fn) - g.m[key] = c - go func() { - // cleanup after a caller has returned - <-c.ready - g.mu.Lock() - delete(g.m, key) - g.mu.Unlock() - close(c.cleaned) - }() - g.mu.Unlock() - return c.wait(ctx) -} - -type call struct { - mu sync.Mutex - result interface{} - err error - ready chan struct{} - cleaned chan struct{} - - ctx *sharedContext - ctxs []context.Context - fn func(ctx context.Context) (interface{}, error) - once sync.Once - - closeProgressWriter func() - progressState *progressState - progressCtx context.Context -} - -func newCall(fn func(ctx context.Context) (interface{}, error)) *call { - c := &call{ - fn: fn, - ready: make(chan struct{}), - cleaned: make(chan struct{}), - progressState: newProgressState(), - } - ctx := newContext(c) // newSharedContext - pr, pctx, closeProgressWriter := progress.NewContext(context.Background()) - - c.progressCtx = pctx - c.ctx = ctx - c.closeProgressWriter = closeProgressWriter - - go c.progressState.run(pr) // TODO: remove this, wrap writer instead - - return c -} - -func (c *call) run() { - defer c.closeProgressWriter() - ctx, cancel := context.WithCancel(c.ctx) - defer cancel() - v, err := c.fn(ctx) - c.mu.Lock() - c.result = v - c.err = err - c.mu.Unlock() - close(c.ready) -} - -func (c *call) wait(ctx context.Context) (v interface{}, err error) { - c.mu.Lock() - // detect case where caller has just returned, let it clean up before - select { - case <-c.ready: - c.mu.Unlock() - <-c.cleaned - return nil, errRetry - case <-c.ctx.done: // could return if no error - c.mu.Unlock() - <-c.cleaned - return nil, errRetry - default: - } - - pw, ok, ctx := progress.FromContext(ctx) - if ok { - c.progressState.add(pw) - } - - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - c.ctxs = append(c.ctxs, ctx) - - c.mu.Unlock() - - go c.once.Do(c.run) - - select { - case <-ctx.Done(): - if c.ctx.checkDone() { - // if this cancelled the last context, then wait for function to shut down - // and don't accept any more callers - <-c.ready - return c.result, c.err - } - if ok { - c.progressState.close(pw) - } - return nil, ctx.Err() - case <-c.ready: - return c.result, c.err // shared not implemented yet - } -} - -func (c *call) Deadline() (deadline time.Time, ok bool) { - c.mu.Lock() - defer c.mu.Unlock() - for _, ctx := range c.ctxs { - select { - case <-ctx.Done(): - default: - dl, ok := ctx.Deadline() - if ok { - return dl, ok - } - } - } - return time.Time{}, false -} - -func (c *call) Done() <-chan struct{} { - return c.ctx.done -} - -func (c *call) Err() error { - select { - case <-c.ctx.Done(): - return c.ctx.err - default: - return nil - } -} - -func (c *call) Value(key interface{}) interface{} { - if key == contextKey { - return c.progressState - } - c.mu.Lock() - defer c.mu.Unlock() - - ctx := c.progressCtx - select { - case <-ctx.Done(): - default: - if v := ctx.Value(key); v != nil { - return v - } - } - - if len(c.ctxs) > 0 { - ctx = c.ctxs[0] - select { - case <-ctx.Done(): - default: - if v := ctx.Value(key); v != nil { - return v - } - } - } - - return nil -} - -type sharedContext struct { - *call - done chan struct{} - err error -} - -func newContext(c *call) *sharedContext { - return &sharedContext{call: c, done: make(chan struct{})} -} - -func (sc *sharedContext) checkDone() bool { - sc.mu.Lock() - select { - case <-sc.done: - sc.mu.Unlock() - return true - default: - } - var err error - for _, ctx := range sc.ctxs { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - sc.mu.Unlock() - return false - } - } - sc.err = err - close(sc.done) - sc.mu.Unlock() - return true -} - -type rawProgressWriter interface { - WriteRawProgress(*progress.Progress) error - Close() error -} - -type progressState struct { - mu sync.Mutex - items map[string]*progress.Progress - writers []rawProgressWriter - done bool -} - -func newProgressState() *progressState { - return &progressState{ - items: make(map[string]*progress.Progress), - } -} - -func (ps *progressState) run(pr progress.Reader) { - for { - p, err := pr.Read(context.TODO()) - if err != nil { - if err == io.EOF { - ps.mu.Lock() - ps.done = true - ps.mu.Unlock() - for _, w := range ps.writers { - w.Close() - } - } - return - } - ps.mu.Lock() - for _, p := range p { - for _, w := range ps.writers { - w.WriteRawProgress(p) - } - ps.items[p.ID] = p - } - ps.mu.Unlock() - } -} - -func (ps *progressState) add(pw progress.Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - ps.mu.Lock() - plist := make([]*progress.Progress, 0, len(ps.items)) - for _, p := range ps.items { - plist = append(plist, p) - } - sort.Slice(plist, func(i, j int) bool { - return plist[i].Timestamp.Before(plist[j].Timestamp) - }) - for _, p := range plist { - rw.WriteRawProgress(p) - } - if ps.done { - rw.Close() - } else { - ps.writers = append(ps.writers, rw) - } - ps.mu.Unlock() -} - -func (ps *progressState) close(pw progress.Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - ps.mu.Lock() - for i, w := range ps.writers { - if w == rw { - w.Close() - ps.writers = append(ps.writers[:i], ps.writers[i+1:]...) - break - } - } - ps.mu.Unlock() -} diff --git a/vendor/github.com/moby/buildkit/util/gitutil/git_protocol.go b/vendor/github.com/moby/buildkit/util/gitutil/git_protocol.go deleted file mode 100644 index 3b9df83920b7..000000000000 --- a/vendor/github.com/moby/buildkit/util/gitutil/git_protocol.go +++ /dev/null @@ -1,46 +0,0 @@ -package gitutil - -import ( - "strings" - - "github.com/moby/buildkit/util/sshutil" -) - -const ( - HTTPProtocol = iota + 1 - HTTPSProtocol - SSHProtocol - GitProtocol - UnknownProtocol -) - -// ParseProtocol parses a git URL and returns the remote url and protocol type -func ParseProtocol(remote string) (string, int) { - prefixes := map[string]int{ - "http://": HTTPProtocol, - "https://": HTTPSProtocol, - "git://": GitProtocol, - "ssh://": SSHProtocol, - } - protocolType := UnknownProtocol - for prefix, potentialType := range prefixes { - if strings.HasPrefix(remote, prefix) { - remote = strings.TrimPrefix(remote, prefix) - protocolType = potentialType - } - } - - if protocolType == UnknownProtocol && sshutil.IsImplicitSSHTransport(remote) { - protocolType = SSHProtocol - } - - // remove name from ssh - if protocolType == SSHProtocol { - parts := strings.SplitN(remote, "@", 2) - if len(parts) == 2 { - remote = parts[1] - } - } - - return remote, protocolType -} diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go deleted file mode 100644 index f12f10bc8a10..000000000000 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ /dev/null @@ -1,218 +0,0 @@ -package grpcerrors - -import ( - "encoding/json" - "errors" - - "github.com/containerd/typeurl" - gogotypes "github.com/gogo/protobuf/types" - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - "github.com/moby/buildkit/util/stack" - "github.com/sirupsen/logrus" - spb "google.golang.org/genproto/googleapis/rpc/status" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type TypedError interface { - ToProto() TypedErrorProto -} - -type TypedErrorProto interface { - proto.Message - WrapError(error) error -} - -func ToGRPC(err error) error { - if err == nil { - return nil - } - st, ok := AsGRPCStatus(err) - if !ok || st == nil { - st = status.New(Code(err), err.Error()) - } - if st.Code() != Code(err) { - code := Code(err) - if code == codes.OK { - code = codes.Unknown - } - pb := st.Proto() - pb.Code = int32(code) - st = status.FromProto(pb) - } - - var details []proto.Message - - for _, st := range stack.Traces(err) { - details = append(details, st) - } - - each(err, func(err error) { - if te, ok := err.(TypedError); ok { - details = append(details, te.ToProto()) - } - }) - - if len(details) > 0 { - if st2, err := withDetails(st, details...); err == nil { - st = st2 - } - } - - return st.Err() -} - -func withDetails(s *status.Status, details ...proto.Message) (*status.Status, error) { - if s.Code() == codes.OK { - return nil, errors.New("no error details for status with code OK") - } - p := s.Proto() - for _, detail := range details { - url, err := typeurl.TypeURL(detail) - if err != nil { - logrus.Warnf("ignoring typed error %T: not registered", detail) - continue - } - dt, err := json.Marshal(detail) - if err != nil { - return nil, err - } - p.Details = append(p.Details, &any.Any{TypeUrl: url, Value: dt}) - } - return status.FromProto(p), nil -} - -func Code(err error) codes.Code { - if se, ok := err.(interface { - Code() codes.Code - }); ok { - return se.Code() - } - - if se, ok := err.(interface { - GRPCStatus() *status.Status - }); ok { - return se.GRPCStatus().Code() - } - - wrapped, ok := err.(interface { - Unwrap() error - }) - if ok { - if err := wrapped.Unwrap(); err != nil { - return Code(err) - } - } - return status.FromContextError(err).Code() -} - -func WrapCode(err error, code codes.Code) error { - return &withCode{error: err, code: code} -} - -func AsGRPCStatus(err error) (*status.Status, bool) { - if err == nil { - return nil, true - } - if se, ok := err.(interface { - GRPCStatus() *status.Status - }); ok { - return se.GRPCStatus(), true - } - - wrapped, ok := err.(interface { - Unwrap() error - }) - if ok { - if err := wrapped.Unwrap(); err != nil { - return AsGRPCStatus(err) - } - } - - return nil, false -} - -func FromGRPC(err error) error { - if err == nil { - return nil - } - st, ok := status.FromError(err) - if !ok { - return err - } - - pb := st.Proto() - - n := &spb.Status{ - Code: pb.Code, - Message: pb.Message, - } - - details := make([]TypedErrorProto, 0, len(pb.Details)) - stacks := make([]*stack.Stack, 0, len(pb.Details)) - - // details that we don't understand are copied as proto - for _, d := range pb.Details { - m, err := typeurl.UnmarshalAny(gogoAny(d)) - if err != nil { - continue - } - - switch v := m.(type) { - case *stack.Stack: - stacks = append(stacks, v) - case TypedErrorProto: - details = append(details, v) - default: - n.Details = append(n.Details, d) - } - } - - err = status.FromProto(n).Err() - - for _, s := range stacks { - if s != nil { - err = stack.Wrap(err, *s) - } - } - - for _, d := range details { - err = d.WrapError(err) - } - - if err != nil { - stack.Helper() - } - - return stack.Enable(err) -} - -type withCode struct { - code codes.Code - error -} - -func (e *withCode) Code() codes.Code { - return e.code -} - -func (e *withCode) Unwrap() error { - return e.error -} - -func each(err error, fn func(error)) { - fn(err) - if wrapped, ok := err.(interface { - Unwrap() error - }); ok { - each(wrapped.Unwrap(), fn) - } -} - -func gogoAny(in *any.Any) *gogotypes.Any { - return &gogotypes.Any{ - TypeUrl: in.TypeUrl, - Value: in.Value, - } -} diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go deleted file mode 100644 index 1c17e4c67d27..000000000000 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go +++ /dev/null @@ -1,54 +0,0 @@ -package grpcerrors - -import ( - "context" - "log" - "os" - - "github.com/moby/buildkit/util/stack" - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { - resp, err = handler(ctx, req) - oldErr := err - if err != nil { - stack.Helper() - err = ToGRPC(err) - } - if oldErr != nil && err == nil { - logErr := errors.Wrap(err, "invalid grpc error conversion") - if os.Getenv("BUILDKIT_DEBUG_PANIC_ON_ERROR") == "1" { - panic(logErr) - } - log.Printf("%v", logErr) - err = oldErr - } - - return resp, err -} - -func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - err := ToGRPC(handler(srv, ss)) - if err != nil { - stack.Helper() - } - return err -} - -func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { - err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...)) - if err != nil { - stack.Helper() - } - return err -} - -func StreamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { - s, err := streamer(ctx, desc, cc, method, opts...) - if err != nil { - stack.Helper() - } - return s, ToGRPC(err) -} diff --git a/vendor/github.com/moby/buildkit/util/progress/multireader.go b/vendor/github.com/moby/buildkit/util/progress/multireader.go deleted file mode 100644 index 2bd3f2ca8616..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/multireader.go +++ /dev/null @@ -1,77 +0,0 @@ -package progress - -import ( - "context" - "io" - "sync" -) - -type MultiReader struct { - mu sync.Mutex - main Reader - initialized bool - done chan struct{} - writers map[*progressWriter]func() -} - -func NewMultiReader(pr Reader) *MultiReader { - mr := &MultiReader{ - main: pr, - writers: make(map[*progressWriter]func()), - done: make(chan struct{}), - } - return mr -} - -func (mr *MultiReader) Reader(ctx context.Context) Reader { - mr.mu.Lock() - defer mr.mu.Unlock() - - pr, ctx, closeWriter := NewContext(ctx) - pw, _, ctx := FromContext(ctx) - - w := pw.(*progressWriter) - mr.writers[w] = closeWriter - - go func() { - select { - case <-ctx.Done(): - case <-mr.done: - } - mr.mu.Lock() - defer mr.mu.Unlock() - delete(mr.writers, w) - }() - - if !mr.initialized { - go mr.handle() - mr.initialized = true - } - - return pr -} - -func (mr *MultiReader) handle() error { - for { - p, err := mr.main.Read(context.TODO()) - if err != nil { - if err == io.EOF { - mr.mu.Lock() - for w, c := range mr.writers { - w.Close() - c() - } - mr.mu.Unlock() - return nil - } - return err - } - mr.mu.Lock() - for _, p := range p { - for w := range mr.writers { - w.writeRawProgress(p) - } - } - mr.mu.Unlock() - } -} diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go deleted file mode 100644 index 1ce37ea210b3..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go +++ /dev/null @@ -1,104 +0,0 @@ -package progress - -import ( - "sort" - "sync" - "time" -) - -type rawProgressWriter interface { - WriteRawProgress(*Progress) error - Close() error -} - -type MultiWriter struct { - mu sync.Mutex - items []*Progress - writers map[rawProgressWriter]struct{} - meta map[string]interface{} -} - -func NewMultiWriter(opts ...WriterOption) *MultiWriter { - mw := &MultiWriter{ - writers: map[rawProgressWriter]struct{}{}, - meta: map[string]interface{}{}, - } - for _, o := range opts { - o(mw) - } - return mw -} - -func (ps *MultiWriter) Add(pw Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - ps.mu.Lock() - plist := make([]*Progress, 0, len(ps.items)) - for _, p := range ps.items { - plist = append(plist, p) - } - sort.Slice(plist, func(i, j int) bool { - return plist[i].Timestamp.Before(plist[j].Timestamp) - }) - for _, p := range plist { - rw.WriteRawProgress(p) - } - ps.writers[rw] = struct{}{} - ps.mu.Unlock() -} - -func (ps *MultiWriter) Delete(pw Writer) { - rw, ok := pw.(rawProgressWriter) - if !ok { - return - } - - ps.mu.Lock() - delete(ps.writers, rw) - ps.mu.Unlock() -} - -func (ps *MultiWriter) Write(id string, v interface{}) error { - p := &Progress{ - ID: id, - Timestamp: time.Now(), - Sys: v, - meta: ps.meta, - } - return ps.WriteRawProgress(p) -} - -func (ps *MultiWriter) WriteRawProgress(p *Progress) error { - meta := p.meta - if len(ps.meta) > 0 { - meta = map[string]interface{}{} - for k, v := range p.meta { - meta[k] = v - } - for k, v := range ps.meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - } - p.meta = meta - return ps.writeRawProgress(p) -} - -func (ps *MultiWriter) writeRawProgress(p *Progress) error { - ps.mu.Lock() - defer ps.mu.Unlock() - ps.items = append(ps.items, p) - for w := range ps.writers { - if err := w.WriteRawProgress(p); err != nil { - return err - } - } - return nil -} - -func (ps *MultiWriter) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go deleted file mode 100644 index 3ce212948c2d..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progress.go +++ /dev/null @@ -1,261 +0,0 @@ -package progress - -import ( - "context" - "io" - "sort" - "sync" - "time" - - "github.com/pkg/errors" -) - -// Progress package provides utility functions for using the context to capture -// progress of a running function. All progress items written contain an ID -// that is used to collapse unread messages. - -type contextKeyT string - -var contextKey = contextKeyT("buildkit/util/progress") - -// FromContext returns a progress writer from a context. -func FromContext(ctx context.Context, opts ...WriterOption) (Writer, bool, context.Context) { - v := ctx.Value(contextKey) - pw, ok := v.(*progressWriter) - if !ok { - if pw, ok := v.(*MultiWriter); ok { - return pw, true, ctx - } - return &noOpWriter{}, false, ctx - } - pw = newWriter(pw) - for _, o := range opts { - o(pw) - } - ctx = context.WithValue(ctx, contextKey, pw) - return pw, true, ctx -} - -type WriterOption func(Writer) - -// NewContext returns a new context and a progress reader that captures all -// progress items writtern to this context. Last returned parameter is a closer -// function to signal that no new writes will happen to this context. -func NewContext(ctx context.Context) (Reader, context.Context, func()) { - pr, pw, cancel := pipe() - ctx = WithProgress(ctx, pw) - return pr, ctx, cancel -} - -func WithProgress(ctx context.Context, pw Writer) context.Context { - return context.WithValue(ctx, contextKey, pw) -} - -func WithMetadata(key string, val interface{}) WriterOption { - return func(w Writer) { - if pw, ok := w.(*progressWriter); ok { - pw.meta[key] = val - } - if pw, ok := w.(*MultiWriter); ok { - pw.meta[key] = val - } - } -} - -type Controller interface { - Start(context.Context) (context.Context, func(error)) - Status(id string, action string) func() -} - -type Writer interface { - Write(id string, value interface{}) error - Close() error -} - -type Reader interface { - Read(context.Context) ([]*Progress, error) -} - -type Progress struct { - ID string - Timestamp time.Time - Sys interface{} - meta map[string]interface{} -} - -type Status struct { - Action string - Current int - Total int - Started *time.Time - Completed *time.Time -} - -type progressReader struct { - ctx context.Context - cond *sync.Cond - mu sync.Mutex - writers map[*progressWriter]struct{} - dirty map[string]*Progress -} - -func (pr *progressReader) Read(ctx context.Context) ([]*Progress, error) { - done := make(chan struct{}) - defer close(done) - go func() { - select { - case <-done: - case <-ctx.Done(): - pr.mu.Lock() - pr.cond.Broadcast() - pr.mu.Unlock() - } - }() - pr.mu.Lock() - for { - select { - case <-ctx.Done(): - pr.mu.Unlock() - return nil, ctx.Err() - default: - } - dmap := pr.dirty - if len(dmap) == 0 { - select { - case <-pr.ctx.Done(): - if len(pr.writers) == 0 { - pr.mu.Unlock() - return nil, io.EOF - } - default: - } - pr.cond.Wait() - continue - } - pr.dirty = make(map[string]*Progress) - pr.mu.Unlock() - - out := make([]*Progress, 0, len(dmap)) - for _, p := range dmap { - out = append(out, p) - } - - sort.Slice(out, func(i, j int) bool { - return out[i].Timestamp.Before(out[j].Timestamp) - }) - - return out, nil - } -} - -func (pr *progressReader) append(pw *progressWriter) { - pr.mu.Lock() - defer pr.mu.Unlock() - - select { - case <-pr.ctx.Done(): - return - default: - pr.writers[pw] = struct{}{} - } -} - -func pipe() (*progressReader, *progressWriter, func()) { - ctx, cancel := context.WithCancel(context.Background()) - pr := &progressReader{ - ctx: ctx, - writers: make(map[*progressWriter]struct{}), - dirty: make(map[string]*Progress), - } - pr.cond = sync.NewCond(&pr.mu) - go func() { - <-ctx.Done() - pr.mu.Lock() - pr.cond.Broadcast() - pr.mu.Unlock() - }() - pw := &progressWriter{ - reader: pr, - } - return pr, pw, cancel -} - -func newWriter(pw *progressWriter) *progressWriter { - meta := make(map[string]interface{}) - for k, v := range pw.meta { - meta[k] = v - } - pw = &progressWriter{ - reader: pw.reader, - meta: meta, - } - pw.reader.append(pw) - return pw -} - -type progressWriter struct { - done bool - reader *progressReader - meta map[string]interface{} -} - -func (pw *progressWriter) Write(id string, v interface{}) error { - if pw.done { - return errors.Errorf("writing %s to closed progress writer", id) - } - return pw.writeRawProgress(&Progress{ - ID: id, - Timestamp: time.Now(), - Sys: v, - meta: pw.meta, - }) -} - -func (pw *progressWriter) WriteRawProgress(p *Progress) error { - meta := p.meta - if len(pw.meta) > 0 { - meta = map[string]interface{}{} - for k, v := range p.meta { - meta[k] = v - } - for k, v := range pw.meta { - if _, ok := meta[k]; !ok { - meta[k] = v - } - } - } - p.meta = meta - return pw.writeRawProgress(p) -} - -func (pw *progressWriter) writeRawProgress(p *Progress) error { - pw.reader.mu.Lock() - pw.reader.dirty[p.ID] = p - pw.reader.cond.Broadcast() - pw.reader.mu.Unlock() - return nil -} - -func (pw *progressWriter) Close() error { - pw.reader.mu.Lock() - delete(pw.reader.writers, pw) - pw.reader.mu.Unlock() - pw.reader.cond.Broadcast() - pw.done = true - return nil -} - -func (p *Progress) Meta(key string) (interface{}, bool) { - v, ok := p.meta[key] - return v, ok -} - -type noOpWriter struct{} - -func (pw *noOpWriter) Write(_ string, _ interface{}) error { - return nil -} - -func (pw *noOpWriter) Close() error { - return nil -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go deleted file mode 100644 index 885bcc41c35b..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go +++ /dev/null @@ -1,587 +0,0 @@ -package progressui - -import ( - "bytes" - "context" - "fmt" - "io" - "os" - "sort" - "strconv" - "strings" - "time" - - "github.com/containerd/console" - "github.com/jaguilar/vt100" - "github.com/moby/buildkit/client" - "github.com/morikuni/aec" - digest "github.com/opencontainers/go-digest" - "github.com/tonistiigi/units" - "golang.org/x/time/rate" -) - -func DisplaySolveStatus(ctx context.Context, phase string, c console.Console, w io.Writer, ch chan *client.SolveStatus) error { - - modeConsole := c != nil - - disp := &display{c: c, phase: phase} - printer := &textMux{w: w} - - if disp.phase == "" { - disp.phase = "Building" - } - - t := newTrace(w, modeConsole) - - tickerTimeout := 150 * time.Millisecond - displayTimeout := 100 * time.Millisecond - - if v := os.Getenv("TTY_DISPLAY_RATE"); v != "" { - if r, err := strconv.ParseInt(v, 10, 64); err == nil { - tickerTimeout = time.Duration(r) * time.Millisecond - displayTimeout = time.Duration(r) * time.Millisecond - } - } - - var done bool - ticker := time.NewTicker(tickerTimeout) - defer ticker.Stop() - - displayLimiter := rate.NewLimiter(rate.Every(displayTimeout), 1) - - var height int - width, _ := disp.getSize() - for { - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - case ss, ok := <-ch: - if ok { - t.update(ss, width) - } else { - done = true - } - } - - if modeConsole { - width, height = disp.getSize() - if done { - disp.print(t.displayInfo(), width, height, true) - t.printErrorLogs(c) - return nil - } else if displayLimiter.Allow() { - ticker.Stop() - ticker = time.NewTicker(tickerTimeout) - disp.print(t.displayInfo(), width, height, false) - } - } else { - if done || displayLimiter.Allow() { - printer.print(t) - if done { - t.printErrorLogs(w) - return nil - } - ticker.Stop() - ticker = time.NewTicker(tickerTimeout) - } - } - } -} - -const termHeight = 6 -const termPad = 10 - -type displayInfo struct { - startTime time.Time - jobs []*job - countTotal int - countCompleted int -} - -type job struct { - startTime *time.Time - completedTime *time.Time - name string - status string - hasError bool - isCanceled bool - vertex *vertex - showTerm bool -} - -type trace struct { - w io.Writer - localTimeDiff time.Duration - vertexes []*vertex - byDigest map[digest.Digest]*vertex - nextIndex int - updates map[digest.Digest]struct{} - modeConsole bool -} - -type vertex struct { - *client.Vertex - statuses []*status - byID map[string]*status - indent string - index int - - logs [][]byte - logsPartial bool - logsOffset int - prev *client.Vertex - events []string - lastBlockTime *time.Time - count int - statusUpdates map[string]struct{} - - jobs []*job - jobCached bool - - term *vt100.VT100 - termBytes int - termCount int -} - -func (v *vertex) update(c int) { - if v.count == 0 { - now := time.Now() - v.lastBlockTime = &now - } - v.count += c -} - -type status struct { - *client.VertexStatus -} - -func newTrace(w io.Writer, modeConsole bool) *trace { - return &trace{ - byDigest: make(map[digest.Digest]*vertex), - updates: make(map[digest.Digest]struct{}), - w: w, - modeConsole: modeConsole, - } -} - -func (t *trace) triggerVertexEvent(v *client.Vertex) { - if v.Started == nil { - return - } - - var old client.Vertex - vtx := t.byDigest[v.Digest] - if v := vtx.prev; v != nil { - old = *v - } - - changed := false - if v.Digest != old.Digest { - changed = true - } - if v.Name != old.Name { - changed = true - } - if v.Started != old.Started { - if v.Started != nil && old.Started == nil || !v.Started.Equal(*old.Started) { - changed = true - } - } - if v.Completed != old.Completed && v.Completed != nil { - changed = true - } - if v.Cached != old.Cached { - changed = true - } - if v.Error != old.Error { - changed = true - } - - if changed { - vtx.update(1) - t.updates[v.Digest] = struct{}{} - } - - t.byDigest[v.Digest].prev = v -} - -func (t *trace) update(s *client.SolveStatus, termWidth int) { - for _, v := range s.Vertexes { - prev, ok := t.byDigest[v.Digest] - if !ok { - t.nextIndex++ - t.byDigest[v.Digest] = &vertex{ - byID: make(map[string]*status), - statusUpdates: make(map[string]struct{}), - index: t.nextIndex, - } - if t.modeConsole { - t.byDigest[v.Digest].term = vt100.NewVT100(termHeight, termWidth-termPad) - } - } - t.triggerVertexEvent(v) - if v.Started != nil && (prev == nil || prev.Started == nil) { - if t.localTimeDiff == 0 { - t.localTimeDiff = time.Since(*v.Started) - } - t.vertexes = append(t.vertexes, t.byDigest[v.Digest]) - } - // allow a duplicate initial vertex that shouldn't reset state - if !(prev != nil && prev.Started != nil && v.Started == nil) { - t.byDigest[v.Digest].Vertex = v - } - t.byDigest[v.Digest].jobCached = false - } - for _, s := range s.Statuses { - v, ok := t.byDigest[s.Vertex] - if !ok { - continue // shouldn't happen - } - v.jobCached = false - prev, ok := v.byID[s.ID] - if !ok { - v.byID[s.ID] = &status{VertexStatus: s} - } - if s.Started != nil && (prev == nil || prev.Started == nil) { - v.statuses = append(v.statuses, v.byID[s.ID]) - } - v.byID[s.ID].VertexStatus = s - v.statusUpdates[s.ID] = struct{}{} - t.updates[v.Digest] = struct{}{} - v.update(1) - } - for _, l := range s.Logs { - v, ok := t.byDigest[l.Vertex] - if !ok { - continue // shouldn't happen - } - v.jobCached = false - if v.term != nil { - if v.term.Width != termWidth { - v.term.Resize(termHeight, termWidth-termPad) - } - v.termBytes += len(l.Data) - v.term.Write(l.Data) // error unhandled on purpose. don't trust vt100 - } - i := 0 - complete := split(l.Data, byte('\n'), func(dt []byte) { - if v.logsPartial && len(v.logs) != 0 && i == 0 { - v.logs[len(v.logs)-1] = append(v.logs[len(v.logs)-1], dt...) - } else { - ts := time.Duration(0) - if v.Started != nil { - ts = l.Timestamp.Sub(*v.Started) - } - prec := 1 - sec := ts.Seconds() - if sec < 10 { - prec = 3 - } else if sec < 100 { - prec = 2 - } - v.logs = append(v.logs, []byte(fmt.Sprintf("#%d %s %s", v.index, fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))) - } - i++ - }) - v.logsPartial = !complete - t.updates[v.Digest] = struct{}{} - v.update(1) - } -} - -func (t *trace) printErrorLogs(f io.Writer) { - for _, v := range t.vertexes { - if v.Error != "" && !strings.HasSuffix(v.Error, context.Canceled.Error()) { - fmt.Fprintln(f, "------") - fmt.Fprintf(f, " > %s:\n", v.Name) - for _, l := range v.logs { - f.Write(l) - fmt.Fprintln(f) - } - fmt.Fprintln(f, "------") - } - } -} - -func (t *trace) displayInfo() (d displayInfo) { - d.startTime = time.Now() - if t.localTimeDiff != 0 { - d.startTime = (*t.vertexes[0].Started).Add(t.localTimeDiff) - } - d.countTotal = len(t.byDigest) - for _, v := range t.byDigest { - if v.Completed != nil { - d.countCompleted++ - } - } - - for _, v := range t.vertexes { - if v.jobCached { - d.jobs = append(d.jobs, v.jobs...) - continue - } - var jobs []*job - j := &job{ - startTime: addTime(v.Started, t.localTimeDiff), - completedTime: addTime(v.Completed, t.localTimeDiff), - name: strings.Replace(v.Name, "\t", " ", -1), - vertex: v, - } - if v.Error != "" { - if strings.HasSuffix(v.Error, context.Canceled.Error()) { - j.isCanceled = true - j.name = "CANCELED " + j.name - } else { - j.hasError = true - j.name = "ERROR " + j.name - } - } - if v.Cached { - j.name = "CACHED " + j.name - } - j.name = v.indent + j.name - jobs = append(jobs, j) - for _, s := range v.statuses { - j := &job{ - startTime: addTime(s.Started, t.localTimeDiff), - completedTime: addTime(s.Completed, t.localTimeDiff), - name: v.indent + "=> " + s.ID, - } - if s.Total != 0 { - j.status = fmt.Sprintf("%.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total)) - } else if s.Current != 0 { - j.status = fmt.Sprintf("%.2f", units.Bytes(s.Current)) - } - jobs = append(jobs, j) - } - d.jobs = append(d.jobs, jobs...) - v.jobs = jobs - v.jobCached = true - } - - return d -} - -func split(dt []byte, sep byte, fn func([]byte)) bool { - if len(dt) == 0 { - return false - } - for { - if len(dt) == 0 { - return true - } - idx := bytes.IndexByte(dt, sep) - if idx == -1 { - fn(dt) - return false - } - fn(dt[:idx]) - dt = dt[idx+1:] - } -} - -func addTime(tm *time.Time, d time.Duration) *time.Time { - if tm == nil { - return nil - } - t := (*tm).Add(d) - return &t -} - -type display struct { - c console.Console - phase string - lineCount int - repeated bool -} - -func (disp *display) getSize() (int, int) { - width := 80 - height := 10 - if disp.c != nil { - size, err := disp.c.Size() - if err == nil && size.Width > 0 && size.Height > 0 { - width = int(size.Width) - height = int(size.Height) - } - } - return width, height -} - -func setupTerminals(jobs []*job, height int, all bool) []*job { - var candidates []*job - numInUse := 0 - for _, j := range jobs { - if j.vertex != nil && j.vertex.termBytes > 0 && j.completedTime == nil { - candidates = append(candidates, j) - } - if j.completedTime == nil { - numInUse++ - } - } - sort.Slice(candidates, func(i, j int) bool { - idxI := candidates[i].vertex.termBytes + candidates[i].vertex.termCount*50 - idxJ := candidates[j].vertex.termBytes + candidates[j].vertex.termCount*50 - return idxI > idxJ - }) - - numFree := height - 2 - numInUse - numToHide := 0 - termLimit := termHeight + 3 - - for i := 0; numFree > termLimit && i < len(candidates); i++ { - candidates[i].showTerm = true - numToHide += candidates[i].vertex.term.UsedHeight() - numFree -= termLimit - } - - if !all { - jobs = wrapHeight(jobs, height-2-numToHide) - } - - return jobs -} - -func (disp *display) print(d displayInfo, width, height int, all bool) { - // this output is inspired by Buck - d.jobs = setupTerminals(d.jobs, height, all) - b := aec.EmptyBuilder - for i := 0; i <= disp.lineCount; i++ { - b = b.Up(1) - } - if !disp.repeated { - b = b.Down(1) - } - disp.repeated = true - fmt.Fprint(disp.c, b.Column(0).ANSI) - - statusStr := "" - if d.countCompleted > 0 && d.countCompleted == d.countTotal && all { - statusStr = "FINISHED" - } - - fmt.Fprint(disp.c, aec.Hide) - defer fmt.Fprint(disp.c, aec.Show) - - out := fmt.Sprintf("[+] %s %.1fs (%d/%d) %s", disp.phase, time.Since(d.startTime).Seconds(), d.countCompleted, d.countTotal, statusStr) - out = align(out, "", width) - fmt.Fprintln(disp.c, out) - lineCount := 0 - for _, j := range d.jobs { - endTime := time.Now() - if j.completedTime != nil { - endTime = *j.completedTime - } - if j.startTime == nil { - continue - } - dt := endTime.Sub(*j.startTime).Seconds() - if dt < 0.05 { - dt = 0 - } - pfx := " => " - timer := fmt.Sprintf(" %3.1fs\n", dt) - status := j.status - showStatus := false - - left := width - len(pfx) - len(timer) - 1 - if status != "" { - if left+len(status) > 20 { - showStatus = true - left -= len(status) + 1 - } - } - if left < 12 { // too small screen to show progress - continue - } - name := j.name - if len(name) > left { - name = name[:left] - } - - out := pfx + name - if showStatus { - out += " " + status - } - - out = align(out, timer, width) - if j.completedTime != nil { - color := aec.BlueF - if j.isCanceled { - color = aec.YellowF - } else if j.hasError { - color = aec.RedF - } - out = aec.Apply(out, color) - } - fmt.Fprint(disp.c, out) - lineCount++ - if j.showTerm { - term := j.vertex.term - term.Resize(termHeight, width-termPad) - for _, l := range term.Content { - if !isEmpty(l) { - out := aec.Apply(fmt.Sprintf(" => => # %s\n", string(l)), aec.Faint) - fmt.Fprint(disp.c, out) - lineCount++ - } - } - j.vertex.termCount++ - j.showTerm = false - } - } - // override previous content - if diff := disp.lineCount - lineCount; diff > 0 { - for i := 0; i < diff; i++ { - fmt.Fprintln(disp.c, strings.Repeat(" ", width)) - } - fmt.Fprint(disp.c, aec.EmptyBuilder.Up(uint(diff)).Column(0).ANSI) - } - disp.lineCount = lineCount -} - -func isEmpty(l []rune) bool { - for _, r := range l { - if r != ' ' { - return false - } - } - return true -} - -func align(l, r string, w int) string { - return fmt.Sprintf("%-[2]*[1]s %[3]s", l, w-len(r)-1, r) -} - -func wrapHeight(j []*job, limit int) []*job { - if limit < 0 { - return nil - } - var wrapped []*job - wrapped = append(wrapped, j...) - if len(j) > limit { - wrapped = wrapped[len(j)-limit:] - - // wrap things around if incomplete jobs were cut - var invisible []*job - for _, j := range j[:len(j)-limit] { - if j.completedTime == nil { - invisible = append(invisible, j) - } - } - - if l := len(invisible); l > 0 { - rewrapped := make([]*job, 0, len(wrapped)) - for _, j := range wrapped { - if j.completedTime == nil || l <= 0 { - rewrapped = append(rewrapped, j) - } - l-- - } - freespace := len(wrapped) - len(rewrapped) - wrapped = append(invisible[len(invisible)-freespace:], rewrapped...) - } - } - return wrapped -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go b/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go deleted file mode 100644 index 639e202d2729..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progressui/printer.go +++ /dev/null @@ -1,292 +0,0 @@ -package progressui - -import ( - "context" - "fmt" - "io" - "os" - "sort" - "strings" - "time" - - digest "github.com/opencontainers/go-digest" - "github.com/tonistiigi/units" -) - -const antiFlicker = 5 * time.Second -const maxDelay = 10 * time.Second -const minTimeDelta = 5 * time.Second -const minProgressDelta = 0.05 // % - -type lastStatus struct { - Current int64 - Timestamp time.Time -} - -type textMux struct { - w io.Writer - current digest.Digest - last map[string]lastStatus - notFirst bool -} - -func (p *textMux) printVtx(t *trace, dgst digest.Digest) { - if p.last == nil { - p.last = make(map[string]lastStatus) - } - - v, ok := t.byDigest[dgst] - if !ok { - return - } - - if dgst != p.current { - if p.current != "" { - old := t.byDigest[p.current] - if old.logsPartial { - fmt.Fprintln(p.w, "") - } - old.logsOffset = 0 - old.count = 0 - fmt.Fprintf(p.w, "#%d ...\n", old.index) - } - - if p.notFirst { - fmt.Fprintln(p.w, "") - } else { - p.notFirst = true - } - - if os.Getenv("PROGRESS_NO_TRUNC") == "0" { - fmt.Fprintf(p.w, "#%d %s\n", v.index, limitString(v.Name, 72)) - } else { - fmt.Fprintf(p.w, "#%d %s\n", v.index, v.Name) - } - - } - - if len(v.events) != 0 { - v.logsOffset = 0 - } - for _, ev := range v.events { - fmt.Fprintf(p.w, "#%d %s\n", v.index, ev) - } - v.events = v.events[:0] - - for _, s := range v.statuses { - if _, ok := v.statusUpdates[s.ID]; ok { - doPrint := true - - if last, ok := p.last[s.ID]; ok && s.Completed == nil { - var progressDelta float64 - if s.Total > 0 { - progressDelta = float64(s.Current-last.Current) / float64(s.Total) - } - timeDelta := s.Timestamp.Sub(last.Timestamp) - if progressDelta < minProgressDelta && timeDelta < minTimeDelta { - doPrint = false - } - } - - if !doPrint { - continue - } - - p.last[s.ID] = lastStatus{ - Timestamp: s.Timestamp, - Current: s.Current, - } - - var bytes string - if s.Total != 0 { - bytes = fmt.Sprintf(" %.2f / %.2f", units.Bytes(s.Current), units.Bytes(s.Total)) - } else if s.Current != 0 { - bytes = fmt.Sprintf(" %.2f", units.Bytes(s.Current)) - } - var tm string - endTime := s.Timestamp - if s.Completed != nil { - endTime = *s.Completed - } - if s.Started != nil { - diff := endTime.Sub(*s.Started).Seconds() - if diff > 0.01 { - tm = fmt.Sprintf(" %.1fs", diff) - } - } - if s.Completed != nil { - tm += " done" - } - fmt.Fprintf(p.w, "#%d %s%s%s\n", v.index, s.ID, bytes, tm) - } - } - v.statusUpdates = map[string]struct{}{} - - for i, l := range v.logs { - if i == 0 { - l = l[v.logsOffset:] - } - fmt.Fprintf(p.w, "%s", []byte(l)) - if i != len(v.logs)-1 || !v.logsPartial { - fmt.Fprintln(p.w, "") - } - } - - if len(v.logs) > 0 { - if v.logsPartial { - v.logs = v.logs[len(v.logs)-1:] - v.logsOffset = len(v.logs[0]) - } else { - v.logs = nil - v.logsOffset = 0 - } - } - - p.current = dgst - if v.Completed != nil { - p.current = "" - v.count = 0 - - if v.Error != "" { - if v.logsPartial { - fmt.Fprintln(p.w, "") - } - if strings.HasSuffix(v.Error, context.Canceled.Error()) { - fmt.Fprintf(p.w, "#%d CANCELED\n", v.index) - } else { - fmt.Fprintf(p.w, "#%d ERROR: %s\n", v.index, v.Error) - } - } else if v.Cached { - fmt.Fprintf(p.w, "#%d CACHED\n", v.index) - } else { - tm := "" - if v.Started != nil { - tm = fmt.Sprintf(" %.1fs", v.Completed.Sub(*v.Started).Seconds()) - } - fmt.Fprintf(p.w, "#%d DONE%s\n", v.index, tm) - } - - } - - delete(t.updates, dgst) -} - -func sortCompleted(t *trace, m map[digest.Digest]struct{}) []digest.Digest { - out := make([]digest.Digest, 0, len(m)) - for k := range m { - out = append(out, k) - } - sort.Slice(out, func(i, j int) bool { - return t.byDigest[out[i]].Completed.Before(*t.byDigest[out[j]].Completed) - }) - return out -} - -func (p *textMux) print(t *trace) { - completed := map[digest.Digest]struct{}{} - rest := map[digest.Digest]struct{}{} - - for dgst := range t.updates { - v, ok := t.byDigest[dgst] - if !ok { - continue - } - if v.Vertex.Completed != nil { - completed[dgst] = struct{}{} - } else { - rest[dgst] = struct{}{} - } - } - - current := p.current - - // items that have completed need to be printed first - if _, ok := completed[current]; ok { - p.printVtx(t, current) - } - - for _, dgst := range sortCompleted(t, completed) { - if dgst != current { - p.printVtx(t, dgst) - } - } - - if len(rest) == 0 { - if current != "" { - if v := t.byDigest[current]; v.Started != nil && v.Completed == nil { - return - } - } - // make any open vertex active - for dgst, v := range t.byDigest { - if v.Started != nil && v.Completed == nil { - p.printVtx(t, dgst) - return - } - } - return - } - - // now print the active one - if _, ok := rest[current]; ok { - p.printVtx(t, current) - } - - stats := map[digest.Digest]*vtxStat{} - now := time.Now() - sum := 0.0 - var max digest.Digest - if current != "" { - rest[current] = struct{}{} - } - for dgst := range rest { - v, ok := t.byDigest[dgst] - if !ok { - continue - } - tm := now.Sub(*v.lastBlockTime) - speed := float64(v.count) / tm.Seconds() - overLimit := tm > maxDelay && dgst != current - stats[dgst] = &vtxStat{blockTime: tm, speed: speed, overLimit: overLimit} - sum += speed - if overLimit || max == "" || stats[max].speed < speed { - max = dgst - } - } - for dgst := range stats { - stats[dgst].share = stats[dgst].speed / sum - } - - if _, ok := completed[current]; ok || current == "" { - p.printVtx(t, max) - return - } - - // show items that were hidden - for dgst := range rest { - if stats[dgst].overLimit { - p.printVtx(t, dgst) - return - } - } - - // fair split between vertexes - if 1.0/(1.0-stats[current].share)*antiFlicker.Seconds() < stats[current].blockTime.Seconds() { - p.printVtx(t, max) - return - } -} - -type vtxStat struct { - blockTime time.Duration - speed float64 - share float64 - overLimit bool -} - -func limitString(s string, l int) string { - if len(s) > l { - return s[:l] + "..." - } - return s -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progresswriter/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/progresswriter/multiwriter.go deleted file mode 100644 index 7d5a4afe14eb..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progresswriter/multiwriter.go +++ /dev/null @@ -1,106 +0,0 @@ -package progresswriter - -import ( - "context" - "strings" - "sync" - - "github.com/moby/buildkit/client" - "golang.org/x/sync/errgroup" -) - -type MultiWriter struct { - w Writer - eg *errgroup.Group - once sync.Once - ready chan struct{} -} - -func (mw *MultiWriter) WithPrefix(pfx string, force bool) Writer { - in := make(chan *client.SolveStatus) - out := mw.w.Status() - p := &prefixed{ - main: mw.w, - in: in, - } - mw.eg.Go(func() error { - mw.once.Do(func() { - close(mw.ready) - }) - for { - select { - case v, ok := <-in: - if ok { - if force { - for _, v := range v.Vertexes { - v.Name = addPrefix(pfx, v.Name) - } - } - out <- v - } else { - return nil - } - case <-mw.Done(): - return mw.Err() - } - } - }) - return p -} - -func (mw *MultiWriter) Done() <-chan struct{} { - return mw.w.Done() -} - -func (mw *MultiWriter) Err() error { - return mw.w.Err() -} - -func (mw *MultiWriter) Status() chan *client.SolveStatus { - return nil -} - -type prefixed struct { - main Writer - in chan *client.SolveStatus -} - -func (p *prefixed) Done() <-chan struct{} { - return p.main.Done() -} - -func (p *prefixed) Err() error { - return p.main.Err() -} - -func (p *prefixed) Status() chan *client.SolveStatus { - return p.in -} - -func NewMultiWriter(pw Writer) *MultiWriter { - if pw == nil { - return nil - } - eg, _ := errgroup.WithContext(context.TODO()) - - ready := make(chan struct{}) - - go func() { - <-ready - eg.Wait() - close(pw.Status()) - }() - - return &MultiWriter{ - w: pw, - eg: eg, - ready: ready, - } -} - -func addPrefix(pfx, name string) string { - if strings.HasPrefix(name, "[") { - return "[" + pfx + " " + name[1:] - } - return "[" + pfx + "] " + name -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progresswriter/printer.go b/vendor/github.com/moby/buildkit/util/progress/progresswriter/printer.go deleted file mode 100644 index 037123dfc1a6..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progresswriter/printer.go +++ /dev/null @@ -1,94 +0,0 @@ -package progresswriter - -import ( - "context" - "os" - - "github.com/containerd/console" - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/util/progress/progressui" - "github.com/pkg/errors" -) - -type printer struct { - status chan *client.SolveStatus - done <-chan struct{} - err error -} - -func (p *printer) Done() <-chan struct{} { - return p.done -} - -func (p *printer) Err() error { - return p.err -} - -func (p *printer) Status() chan *client.SolveStatus { - if p == nil { - return nil - } - return p.status -} - -type tee struct { - Writer - status chan *client.SolveStatus -} - -func (t *tee) Status() chan *client.SolveStatus { - return t.status -} - -func Tee(w Writer, ch chan *client.SolveStatus) Writer { - st := make(chan *client.SolveStatus) - t := &tee{ - status: st, - Writer: w, - } - go func() { - for v := range st { - w.Status() <- v - ch <- v - } - close(w.Status()) - close(ch) - }() - return t -} - -func NewPrinter(ctx context.Context, out console.File, mode string) (Writer, error) { - statusCh := make(chan *client.SolveStatus) - doneCh := make(chan struct{}) - - pw := &printer{ - status: statusCh, - done: doneCh, - } - - if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == "auto" { - mode = v - } - - var c console.Console - switch mode { - case "auto", "tty", "": - if cons, err := console.ConsoleFromFile(out); err == nil { - c = cons - } else { - if mode == "tty" { - return nil, errors.Wrap(err, "failed to get console") - } - } - case "plain": - default: - return nil, errors.Errorf("invalid progress mode %s", mode) - } - - go func() { - // not using shared context to not disrupt display but let is finish reporting errors - pw.err = progressui.DisplaySolveStatus(ctx, "", c, out, statusCh) - close(doneCh) - }() - return pw, nil -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progresswriter/progress.go b/vendor/github.com/moby/buildkit/util/progress/progresswriter/progress.go deleted file mode 100644 index 31e4fa45990c..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progresswriter/progress.go +++ /dev/null @@ -1,93 +0,0 @@ -package progresswriter - -import ( - "time" - - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/identity" - "github.com/opencontainers/go-digest" -) - -type Logger func(*client.SolveStatus) - -type SubLogger interface { - Wrap(name string, fn func() error) error - Log(stream int, dt []byte) -} - -func Wrap(name string, l Logger, fn func(SubLogger) error) (err error) { - if l == nil { - return nil - } - dgst := digest.FromBytes([]byte(identity.NewID())) - tm := time.Now() - l(&client.SolveStatus{ - Vertexes: []*client.Vertex{{ - Digest: dgst, - Name: name, - Started: &tm, - }}, - }) - - defer func() { - tm2 := time.Now() - errMsg := "" - if err != nil { - errMsg = err.Error() - } - l(&client.SolveStatus{ - Vertexes: []*client.Vertex{{ - Digest: dgst, - Name: name, - Started: &tm, - Completed: &tm2, - Error: errMsg, - }}, - }) - }() - - return fn(&subLogger{dgst, l}) -} - -type subLogger struct { - dgst digest.Digest - logger Logger -} - -func (sl *subLogger) Wrap(name string, fn func() error) (err error) { - tm := time.Now() - sl.logger(&client.SolveStatus{ - Statuses: []*client.VertexStatus{{ - Vertex: sl.dgst, - ID: name, - Timestamp: time.Now(), - Started: &tm, - }}, - }) - - defer func() { - tm2 := time.Now() - sl.logger(&client.SolveStatus{ - Statuses: []*client.VertexStatus{{ - Vertex: sl.dgst, - ID: name, - Timestamp: time.Now(), - Started: &tm, - Completed: &tm2, - }}, - }) - }() - - return fn() -} - -func (sl *subLogger) Log(stream int, dt []byte) { - sl.logger(&client.SolveStatus{ - Logs: []*client.VertexLog{{ - Vertex: sl.dgst, - Stream: stream, - Data: dt, - Timestamp: time.Now(), - }}, - }) -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progresswriter/reset.go b/vendor/github.com/moby/buildkit/util/progress/progresswriter/reset.go deleted file mode 100644 index a9ac9f6f6297..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progresswriter/reset.go +++ /dev/null @@ -1,71 +0,0 @@ -package progresswriter - -import ( - "time" - - "github.com/moby/buildkit/client" -) - -func ResetTime(in Writer) Writer { - w := &pw{Writer: in, status: make(chan *client.SolveStatus), tm: time.Now()} - go func() { - for { - select { - case <-in.Done(): - return - case st, ok := <-w.status: - if !ok { - close(in.Status()) - return - } - if w.diff == nil { - for _, v := range st.Vertexes { - if v.Started != nil { - d := v.Started.Sub(w.tm) - w.diff = &d - } - } - } - if w.diff != nil { - for _, v := range st.Vertexes { - if v.Started != nil { - d := v.Started.Add(-*w.diff) - v.Started = &d - } - if v.Completed != nil { - d := v.Completed.Add(-*w.diff) - v.Completed = &d - } - } - for _, v := range st.Statuses { - if v.Started != nil { - d := v.Started.Add(-*w.diff) - v.Started = &d - } - if v.Completed != nil { - d := v.Completed.Add(-*w.diff) - v.Completed = &d - } - v.Timestamp = v.Timestamp.Add(-*w.diff) - } - for _, v := range st.Logs { - v.Timestamp = v.Timestamp.Add(-*w.diff) - } - } - in.Status() <- st - } - } - }() - return w -} - -type pw struct { - Writer - tm time.Time - diff *time.Duration - status chan *client.SolveStatus -} - -func (p *pw) Status() chan *client.SolveStatus { - return p.status -} diff --git a/vendor/github.com/moby/buildkit/util/progress/progresswriter/writer.go b/vendor/github.com/moby/buildkit/util/progress/progresswriter/writer.go deleted file mode 100644 index f30c96644124..000000000000 --- a/vendor/github.com/moby/buildkit/util/progress/progresswriter/writer.go +++ /dev/null @@ -1,46 +0,0 @@ -package progresswriter - -import ( - "time" - - "github.com/moby/buildkit/client" - "github.com/moby/buildkit/identity" - "github.com/opencontainers/go-digest" -) - -type Writer interface { - Done() <-chan struct{} - Err() error - Status() chan *client.SolveStatus -} - -func Write(w Writer, name string, f func() error) { - status := w.Status() - dgst := digest.FromBytes([]byte(identity.NewID())) - tm := time.Now() - - vtx := client.Vertex{ - Digest: dgst, - Name: name, - Started: &tm, - } - - status <- &client.SolveStatus{ - Vertexes: []*client.Vertex{&vtx}, - } - - var err error - if f != nil { - err = f() - } - - tm2 := time.Now() - vtx2 := vtx - vtx2.Completed = &tm2 - if err != nil { - vtx2.Error = err.Error() - } - status <- &client.SolveStatus{ - Vertexes: []*client.Vertex{&vtx2}, - } -} diff --git a/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go b/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go deleted file mode 100644 index b6f23678603a..000000000000 --- a/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go +++ /dev/null @@ -1,51 +0,0 @@ -package sshutil - -import ( - "fmt" - "net" - "strconv" - "strings" - - "golang.org/x/crypto/ssh" -) - -const defaultPort = 22 - -var errCallbackDone = fmt.Errorf("callback failed on purpose") - -// addDefaultPort appends a default port if hostport doesn't contain one -func addDefaultPort(hostport string, defaultPort int) string { - _, _, err := net.SplitHostPort(hostport) - if err == nil { - return hostport - } - hostport = net.JoinHostPort(hostport, strconv.Itoa(defaultPort)) - return hostport -} - -// SshKeyScan scans a ssh server for the hostkey; server should be in the form hostname, or hostname:port -func SSHKeyScan(server string) (string, error) { - var key string - KeyScanCallback := func(hostport string, remote net.Addr, pubKey ssh.PublicKey) error { - hostname, _, err := net.SplitHostPort(hostport) - if err != nil { - return err - } - key = strings.TrimSpace(fmt.Sprintf("%s %s", hostname, string(ssh.MarshalAuthorizedKey(pubKey)))) - return errCallbackDone - } - config := &ssh.ClientConfig{ - HostKeyCallback: KeyScanCallback, - } - - server = addDefaultPort(server, defaultPort) - conn, err := ssh.Dial("tcp", server, config) - if key != "" { - // as long as we get the key, the function worked - err = nil - } - if conn != nil { - conn.Close() - } - return key, err -} diff --git a/vendor/github.com/moby/buildkit/util/sshutil/transport_validation.go b/vendor/github.com/moby/buildkit/util/sshutil/transport_validation.go deleted file mode 100644 index c50a2cc7f95d..000000000000 --- a/vendor/github.com/moby/buildkit/util/sshutil/transport_validation.go +++ /dev/null @@ -1,11 +0,0 @@ -package sshutil - -import ( - "regexp" -) - -var gitSSHRegex = regexp.MustCompile("^[a-zA-Z0-9-_]+@[a-zA-Z0-9-.]+:.*$") - -func IsImplicitSSHTransport(s string) bool { - return gitSSHRegex.MatchString(s) -} diff --git a/vendor/github.com/moby/buildkit/util/stack/generate.go b/vendor/github.com/moby/buildkit/util/stack/generate.go deleted file mode 100644 index 97516baa99c5..000000000000 --- a/vendor/github.com/moby/buildkit/util/stack/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package stack - -//go:generate protoc -I=. -I=../../vendor/ --go_out=. stack.proto diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.go b/vendor/github.com/moby/buildkit/util/stack/stack.go deleted file mode 100644 index 3409ac047af0..000000000000 --- a/vendor/github.com/moby/buildkit/util/stack/stack.go +++ /dev/null @@ -1,182 +0,0 @@ -package stack - -import ( - "fmt" - io "io" - "os" - "runtime" - "strconv" - "strings" - "sync" - - "github.com/containerd/typeurl" - "github.com/pkg/errors" -) - -var helpers map[string]struct{} -var helpersMu sync.RWMutex - -func init() { - typeurl.Register((*Stack)(nil), "github.com/moby/buildkit", "stack.Stack+json") - - helpers = map[string]struct{}{} -} - -var version string -var revision string - -func SetVersionInfo(v, r string) { - version = v - revision = r -} - -func Helper() { - var pc [1]uintptr - n := runtime.Callers(2, pc[:]) - if n == 0 { - return - } - frames := runtime.CallersFrames(pc[:n]) - frame, _ := frames.Next() - helpersMu.Lock() - helpers[frame.Function] = struct{}{} - helpersMu.Unlock() -} - -func Traces(err error) []*Stack { - var st []*Stack - - wrapped, ok := err.(interface { - Unwrap() error - }) - if ok { - st = Traces(wrapped.Unwrap()) - } - - if ste, ok := err.(interface { - StackTrace() errors.StackTrace - }); ok { - st = append(st, convertStack(ste.StackTrace())) - } - - if ste, ok := err.(interface { - StackTrace() *Stack - }); ok { - st = append(st, ste.StackTrace()) - } - - return st -} - -func Enable(err error) error { - if err == nil { - return nil - } - Helper() - if !hasLocalStackTrace(err) { - return errors.WithStack(err) - } - return err -} - -func Wrap(err error, s Stack) error { - return &withStack{stack: s, error: err} -} - -func hasLocalStackTrace(err error) bool { - wrapped, ok := err.(interface { - Unwrap() error - }) - if ok && hasLocalStackTrace(wrapped.Unwrap()) { - return true - } - - _, ok = err.(interface { - StackTrace() errors.StackTrace - }) - return ok -} - -func Formatter(err error) fmt.Formatter { - return &formatter{err} -} - -type formatter struct { - error -} - -func (w *formatter) Format(s fmt.State, verb rune) { - if w.error == nil { - fmt.Fprintf(s, "%v", w.error) - return - } - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%s\n", w.Error()) - for _, stack := range Traces(w.error) { - fmt.Fprintf(s, "%d %s %s\n", stack.Pid, stack.Version, strings.Join(stack.Cmdline, " ")) - for _, f := range stack.Frames { - fmt.Fprintf(s, "%s\n\t%s:%d\n", f.Name, f.File, f.Line) - } - fmt.Fprintln(s) - } - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -func convertStack(s errors.StackTrace) *Stack { - var out Stack - helpersMu.RLock() - defer helpersMu.RUnlock() - for _, f := range s { - dt, err := f.MarshalText() - if err != nil { - continue - } - p := strings.SplitN(string(dt), " ", 2) - if len(p) != 2 { - continue - } - if _, ok := helpers[p[0]]; ok { - continue - } - idx := strings.LastIndexByte(p[1], ':') - if idx == -1 { - continue - } - line, err := strconv.Atoi(p[1][idx+1:]) - if err != nil { - continue - } - out.Frames = append(out.Frames, &Frame{ - Name: p[0], - File: p[1][:idx], - Line: int32(line), - }) - } - out.Cmdline = os.Args - out.Pid = int32(os.Getpid()) - out.Version = version - out.Revision = revision - return &out -} - -type withStack struct { - stack Stack - error -} - -func (e *withStack) Unwrap() error { - return e.error -} - -func (e *withStack) StackTrace() *Stack { - return &e.stack -} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go b/vendor/github.com/moby/buildkit/util/stack/stack.pb.go deleted file mode 100644 index df55582db48a..000000000000 --- a/vendor/github.com/moby/buildkit/util/stack/stack.pb.go +++ /dev/null @@ -1,172 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: stack.proto - -package stack - -import ( - fmt "fmt" - proto "github.com/golang/protobuf/proto" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package - -type Stack struct { - Frames []*Frame `protobuf:"bytes,1,rep,name=frames,proto3" json:"frames,omitempty"` - Cmdline []string `protobuf:"bytes,2,rep,name=cmdline,proto3" json:"cmdline,omitempty"` - Pid int32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"` - Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` - Revision string `protobuf:"bytes,5,opt,name=revision,proto3" json:"revision,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Stack) Reset() { *m = Stack{} } -func (m *Stack) String() string { return proto.CompactTextString(m) } -func (*Stack) ProtoMessage() {} -func (*Stack) Descriptor() ([]byte, []int) { - return fileDescriptor_b44c07feb2ca0a5a, []int{0} -} - -func (m *Stack) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Stack.Unmarshal(m, b) -} -func (m *Stack) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Stack.Marshal(b, m, deterministic) -} -func (m *Stack) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stack.Merge(m, src) -} -func (m *Stack) XXX_Size() int { - return xxx_messageInfo_Stack.Size(m) -} -func (m *Stack) XXX_DiscardUnknown() { - xxx_messageInfo_Stack.DiscardUnknown(m) -} - -var xxx_messageInfo_Stack proto.InternalMessageInfo - -func (m *Stack) GetFrames() []*Frame { - if m != nil { - return m.Frames - } - return nil -} - -func (m *Stack) GetCmdline() []string { - if m != nil { - return m.Cmdline - } - return nil -} - -func (m *Stack) GetPid() int32 { - if m != nil { - return m.Pid - } - return 0 -} - -func (m *Stack) GetVersion() string { - if m != nil { - return m.Version - } - return "" -} - -func (m *Stack) GetRevision() string { - if m != nil { - return m.Revision - } - return "" -} - -type Frame struct { - Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` - File string `protobuf:"bytes,2,opt,name=File,proto3" json:"File,omitempty"` - Line int32 `protobuf:"varint,3,opt,name=Line,proto3" json:"Line,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` -} - -func (m *Frame) Reset() { *m = Frame{} } -func (m *Frame) String() string { return proto.CompactTextString(m) } -func (*Frame) ProtoMessage() {} -func (*Frame) Descriptor() ([]byte, []int) { - return fileDescriptor_b44c07feb2ca0a5a, []int{1} -} - -func (m *Frame) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Frame.Unmarshal(m, b) -} -func (m *Frame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Frame.Marshal(b, m, deterministic) -} -func (m *Frame) XXX_Merge(src proto.Message) { - xxx_messageInfo_Frame.Merge(m, src) -} -func (m *Frame) XXX_Size() int { - return xxx_messageInfo_Frame.Size(m) -} -func (m *Frame) XXX_DiscardUnknown() { - xxx_messageInfo_Frame.DiscardUnknown(m) -} - -var xxx_messageInfo_Frame proto.InternalMessageInfo - -func (m *Frame) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Frame) GetFile() string { - if m != nil { - return m.File - } - return "" -} - -func (m *Frame) GetLine() int32 { - if m != nil { - return m.Line - } - return 0 -} - -func init() { - proto.RegisterType((*Stack)(nil), "stack.Stack") - proto.RegisterType((*Frame)(nil), "stack.Frame") -} - -func init() { - proto.RegisterFile("stack.proto", fileDescriptor_b44c07feb2ca0a5a) -} - -var fileDescriptor_b44c07feb2ca0a5a = []byte{ - // 185 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x3d, 0xce, 0x82, 0x40, - 0x10, 0x86, 0xb3, 0xdf, 0xb2, 0x7c, 0x3a, 0x58, 0x98, 0xa9, 0x36, 0x56, 0x1b, 0x62, 0x41, 0x45, - 0xa1, 0x47, 0x30, 0xa1, 0x32, 0x16, 0x78, 0x02, 0x84, 0x35, 0xd9, 0xc8, 0x5f, 0x76, 0x09, 0xd7, - 0xf0, 0xca, 0x66, 0x06, 0xb4, 0x7b, 0xde, 0x9f, 0xe4, 0x9d, 0x81, 0x24, 0x4c, 0x55, 0xfd, 0xca, - 0x47, 0x3f, 0x4c, 0x03, 0x2a, 0x16, 0xe9, 0x5b, 0x80, 0xba, 0x13, 0xe1, 0x11, 0xe2, 0xa7, 0xaf, - 0x3a, 0x1b, 0xb4, 0x30, 0x32, 0x4b, 0x4e, 0xbb, 0x7c, 0xa9, 0x17, 0x64, 0x96, 0x6b, 0x86, 0x1a, - 0xfe, 0xeb, 0xae, 0x69, 0x5d, 0x6f, 0xf5, 0x9f, 0x91, 0xd9, 0xb6, 0xfc, 0x4a, 0xdc, 0x83, 0x1c, - 0x5d, 0xa3, 0xa5, 0x11, 0x99, 0x2a, 0x09, 0xa9, 0x3b, 0x5b, 0x1f, 0xdc, 0xd0, 0xeb, 0xc8, 0x08, - 0xea, 0xae, 0x12, 0x0f, 0xb0, 0xf1, 0x76, 0x76, 0x1c, 0x29, 0x8e, 0x7e, 0x3a, 0xbd, 0x80, 0xe2, - 0x49, 0x44, 0x88, 0x6e, 0x55, 0x67, 0xb5, 0xe0, 0x02, 0x33, 0x79, 0x85, 0x6b, 0x69, 0x9b, 0x3d, - 0x62, 0xf2, 0xae, 0x74, 0xcf, 0xb2, 0xcc, 0xfc, 0x88, 0xf9, 0xc9, 0xf3, 0x27, 0x00, 0x00, 0xff, - 0xff, 0xfd, 0x2c, 0xbb, 0xfb, 0xf3, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/moby/buildkit/util/stack/stack.proto b/vendor/github.com/moby/buildkit/util/stack/stack.proto deleted file mode 100644 index 9c63bc3626c1..000000000000 --- a/vendor/github.com/moby/buildkit/util/stack/stack.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package stack; - -message Stack { - repeated Frame frames = 1; - repeated string cmdline = 2; - int32 pid = 3; - string version = 4; - string revision = 5; -} - -message Frame { - string Name = 1; - string File = 2; - int32 Line = 3; -} \ No newline at end of file diff --git a/vendor/github.com/moby/buildkit/util/system/path.go b/vendor/github.com/moby/buildkit/util/system/path.go deleted file mode 100644 index f6dc70dc8dd5..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/path.go +++ /dev/null @@ -1,18 +0,0 @@ -package system - -// DefaultPathEnvUnix is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnvUnix = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnvWindows is windows style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ';' character . -const DefaultPathEnvWindows = "c:\\Windows\\System32;c:\\Windows" - -func DefaultPathEnv(os string) string { - if os == "windows" { - return DefaultPathEnvWindows - } - return DefaultPathEnvUnix -} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go deleted file mode 100644 index f3762e69d36a..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package system - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go deleted file mode 100644 index 3fc47449484e..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/path_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go deleted file mode 100644 index 62afa03fef03..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build linux,seccomp - -package system - -import ( - "sync" - - "golang.org/x/sys/unix" -) - -var seccompSupported bool -var seccompOnce sync.Once - -func SeccompSupported() bool { - seccompOnce.Do(func() { - seccompSupported = getSeccompSupported() - }) - return seccompSupported -} - -func getSeccompSupported() bool { - if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { - // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { - return true - } - } - return false -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go deleted file mode 100644 index e348c379a903..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,seccomp - -package system - -func SeccompSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go b/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go deleted file mode 100644 index 84cfb7fa8398..000000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !seccomp - -package system - -func SeccompSupported() bool { - return false -} diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE deleted file mode 100644 index f0027349e830..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 The OpenTracing Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md deleted file mode 100644 index 6ef1d7c9d274..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/README.md +++ /dev/null @@ -1,171 +0,0 @@ -[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) - -# OpenTracing API for Go - -This package is a Go platform API for OpenTracing. - -## Required Reading - -In order to understand the Go platform API, one must first be familiar with the -[OpenTracing project](https://opentracing.io) and -[terminology](https://opentracing.io/specification/) more specifically. - -## API overview for those adding instrumentation - -Everyday consumers of this `opentracing` package really only need to worry -about a couple of key abstractions: the `StartSpan` function, the `Span` -interface, and binding a `Tracer` at `main()`-time. Here are code snippets -demonstrating some important use cases. - -#### Singleton initialization - -The simplest starting point is `./default_tracer.go`. As early as possible, call - -```go - import "github.com/opentracing/opentracing-go" - import ".../some_tracing_impl" - - func main() { - opentracing.SetGlobalTracer( - // tracing impl specific: - some_tracing_impl.New(...), - ) - ... - } -``` - -#### Non-Singleton initialization - -If you prefer direct control to singletons, manage ownership of the -`opentracing.Tracer` implementation explicitly. - -#### Creating a Span given an existing Go `context.Context` - -If you use `context.Context` in your application, OpenTracing's Go library will -happily rely on it for `Span` propagation. To start a new (blocking child) -`Span`, you can use `StartSpanFromContext`. - -```go - func xyz(ctx context.Context, ...) { - ... - span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") - defer span.Finish() - span.LogFields( - log.String("event", "soft error"), - log.String("type", "cache timeout"), - log.Int("waited.millis", 1500)) - ... - } -``` - -#### Starting an empty trace by creating a "root span" - -It's always possible to create a "root" `Span` with no parent or other causal -reference. - -```go - func xyz() { - ... - sp := opentracing.StartSpan("operation_name") - defer sp.Finish() - ... - } -``` - -#### Creating a (child) Span given an existing (parent) Span - -```go - func xyz(parentSpan opentracing.Span, ...) { - ... - sp := opentracing.StartSpan( - "operation_name", - opentracing.ChildOf(parentSpan.Context())) - defer sp.Finish() - ... - } -``` - -#### Serializing to the wire - -```go - func makeSomeRequest(ctx context.Context) ... { - if span := opentracing.SpanFromContext(ctx); span != nil { - httpClient := &http.Client{} - httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) - - // Transmit the span's TraceContext as HTTP headers on our - // outbound request. - opentracing.GlobalTracer().Inject( - span.Context(), - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(httpReq.Header)) - - resp, err := httpClient.Do(httpReq) - ... - } - ... - } -``` - -#### Deserializing from the wire - -```go - http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - var serverSpan opentracing.Span - appSpecificOperationName := ... - wireContext, err := opentracing.GlobalTracer().Extract( - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(req.Header)) - if err != nil { - // Optionally record something about err here - } - - // Create the span referring to the RPC client if available. - // If wireContext == nil, a root span will be created. - serverSpan = opentracing.StartSpan( - appSpecificOperationName, - ext.RPCServerOption(wireContext)) - - defer serverSpan.Finish() - - ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) - ... - } -``` - -#### Conditionally capture a field using `log.Noop` - -In some situations, you may want to dynamically decide whether or not -to log a field. For example, you may want to capture additional data, -such as a customer ID, in non-production environments: - -```go - func Customer(order *Order) log.Field { - if os.Getenv("ENVIRONMENT") == "dev" { - return log.String("customer", order.Customer.ID) - } - return log.Noop() - } -``` - -#### Goroutine-safety - -The entire public API is goroutine-safe and does not require external -synchronization. - -## API pointers for those implementing a tracing system - -Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. - -## API compatibility - -For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. - -## Tracer test suite - -A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. - -## Licensing - -[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go deleted file mode 100644 index e11977ebe85d..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext.go +++ /dev/null @@ -1,24 +0,0 @@ -package opentracing - -import ( - "context" -) - -// TracerContextWithSpanExtension is an extension interface that the -// implementation of the Tracer interface may want to implement. It -// allows to have some control over the go context when the -// ContextWithSpan is invoked. -// -// The primary purpose of this extension are adapters from opentracing -// API to some other tracing API. -type TracerContextWithSpanExtension interface { - // ContextWithSpanHook gets called by the ContextWithSpan - // function, when the Tracer implementation also implements - // this interface. It allows to put extra information into the - // context and make it available to the callers of the - // ContextWithSpan. - // - // This hook is invoked before the ContextWithSpan function - // actually puts the span into the context. - ContextWithSpanHook(ctx context.Context, span Span) context.Context -} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go deleted file mode 100644 index 8282bd758467..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/field.go +++ /dev/null @@ -1,17 +0,0 @@ -package ext - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -// LogError sets the error=true tag on the Span and logs err as an "error" event. -func LogError(span opentracing.Span, err error, fields ...log.Field) { - Error.Set(span, true) - ef := []log.Field{ - log.Event("error"), - log.Error(err), - } - ef = append(ef, fields...) - span.LogFields(ef...) -} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go deleted file mode 100644 index a414b5951f03..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ /dev/null @@ -1,215 +0,0 @@ -package ext - -import "github.com/opentracing/opentracing-go" - -// These constants define common tag names recommended for better portability across -// tracing systems and languages/platforms. -// -// The tag names are defined as typed strings, so that in addition to the usual use -// -// span.setTag(TagName, value) -// -// they also support value type validation via this additional syntax: -// -// TagName.Set(span, value) -// -var ( - ////////////////////////////////////////////////////////////////////// - // SpanKind (client/server or producer/consumer) - ////////////////////////////////////////////////////////////////////// - - // SpanKind hints at relationship between spans, e.g. client/server - SpanKind = spanKindTagName("span.kind") - - // SpanKindRPCClient marks a span representing the client-side of an RPC - // or other remote call - SpanKindRPCClientEnum = SpanKindEnum("client") - SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} - - // SpanKindRPCServer marks a span representing the server-side of an RPC - // or other remote call - SpanKindRPCServerEnum = SpanKindEnum("server") - SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} - - // SpanKindProducer marks a span representing the producer-side of a - // message bus - SpanKindProducerEnum = SpanKindEnum("producer") - SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} - - // SpanKindConsumer marks a span representing the consumer-side of a - // message bus - SpanKindConsumerEnum = SpanKindEnum("consumer") - SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} - - ////////////////////////////////////////////////////////////////////// - // Component name - ////////////////////////////////////////////////////////////////////// - - // Component is a low-cardinality identifier of the module, library, - // or package that is generating a span. - Component = StringTagName("component") - - ////////////////////////////////////////////////////////////////////// - // Sampling hint - ////////////////////////////////////////////////////////////////////// - - // SamplingPriority determines the priority of sampling this Span. - SamplingPriority = Uint16TagName("sampling.priority") - - ////////////////////////////////////////////////////////////////////// - // Peer tags. These tags can be emitted by either client-side or - // server-side to describe the other side/service in a peer-to-peer - // communications, like an RPC call. - ////////////////////////////////////////////////////////////////////// - - // PeerService records the service name of the peer. - PeerService = StringTagName("peer.service") - - // PeerAddress records the address name of the peer. This may be a "ip:port", - // a bare "hostname", a FQDN or even a database DSN substring - // like "mysql://username@127.0.0.1:3306/dbname" - PeerAddress = StringTagName("peer.address") - - // PeerHostname records the host name of the peer - PeerHostname = StringTagName("peer.hostname") - - // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = IPv4TagName("peer.ipv4") - - // PeerHostIPv6 records IP v6 host address of the peer - PeerHostIPv6 = StringTagName("peer.ipv6") - - // PeerPort records port number of the peer - PeerPort = Uint16TagName("peer.port") - - ////////////////////////////////////////////////////////////////////// - // HTTP Tags - ////////////////////////////////////////////////////////////////////// - - // HTTPUrl should be the URL of the request being handled in this segment - // of the trace, in standard URI format. The protocol is optional. - HTTPUrl = StringTagName("http.url") - - // HTTPMethod is the HTTP method of the request, and is case-insensitive. - HTTPMethod = StringTagName("http.method") - - // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the - // HTTP response. - HTTPStatusCode = Uint16TagName("http.status_code") - - ////////////////////////////////////////////////////////////////////// - // DB Tags - ////////////////////////////////////////////////////////////////////// - - // DBInstance is database instance name. - DBInstance = StringTagName("db.instance") - - // DBStatement is a database statement for the given database type. - // It can be a query or a prepared statement (i.e., before substitution). - DBStatement = StringTagName("db.statement") - - // DBType is a database type. For any SQL database, "sql". - // For others, the lower-case database category, e.g. "redis" - DBType = StringTagName("db.type") - - // DBUser is a username for accessing database. - DBUser = StringTagName("db.user") - - ////////////////////////////////////////////////////////////////////// - // Message Bus Tag - ////////////////////////////////////////////////////////////////////// - - // MessageBusDestination is an address at which messages can be exchanged - MessageBusDestination = StringTagName("message_bus.destination") - - ////////////////////////////////////////////////////////////////////// - // Error Tag - ////////////////////////////////////////////////////////////////////// - - // Error indicates that operation represented by the span resulted in an error. - Error = BoolTagName("error") -) - -// --- - -// SpanKindEnum represents common span types -type SpanKindEnum string - -type spanKindTagName string - -// Set adds a string tag to the `span` -func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { - span.SetTag(string(tag), value) -} - -type rpcServerOption struct { - clientContext opentracing.SpanContext -} - -func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { - if r.clientContext != nil { - opentracing.ChildOf(r.clientContext).Apply(o) - } - SpanKindRPCServer.Apply(o) -} - -// RPCServerOption returns a StartSpanOption appropriate for an RPC server span -// with `client` representing the metadata for the remote peer Span if available. -// In case client == nil, due to the client not being instrumented, this RPC -// server span will be a root span. -func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { - return rpcServerOption{client} -} - -// --- - -// StringTagName is a common tag name to be set to a string value -type StringTagName string - -// Set adds a string tag to the `span` -func (tag StringTagName) Set(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint32TagName is a common tag name to be set to a uint32 value -type Uint32TagName string - -// Set adds a uint32 tag to the `span` -func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint16TagName is a common tag name to be set to a uint16 value -type Uint16TagName string - -// Set adds a uint16 tag to the `span` -func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { - span.SetTag(string(tag), value) -} - -// --- - -// BoolTagName is a common tag name to be set to a bool value -type BoolTagName string - -// Set adds a bool tag to the `span` -func (tag BoolTagName) Set(span opentracing.Span, value bool) { - span.SetTag(string(tag), value) -} - -// IPv4TagName is a common tag name to be set to an ipv4 value -type IPv4TagName string - -// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility -func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" -func (tag IPv4TagName) SetString(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go deleted file mode 100644 index 4f7066a925cd..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/globaltracer.go +++ /dev/null @@ -1,42 +0,0 @@ -package opentracing - -type registeredTracer struct { - tracer Tracer - isRegistered bool -} - -var ( - globalTracer = registeredTracer{NoopTracer{}, false} -) - -// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by -// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an -// opentracing.Tracer instance) should call SetGlobalTracer as early as -// possible in main(), prior to calling the `StartSpan` global func below. -// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` -// (etc) globals are noops. -func SetGlobalTracer(tracer Tracer) { - globalTracer = registeredTracer{tracer, true} -} - -// GlobalTracer returns the global singleton `Tracer` implementation. -// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop -// implementation that drops all data handed to it. -func GlobalTracer() Tracer { - return globalTracer.tracer -} - -// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. -func StartSpan(operationName string, opts ...StartSpanOption) Span { - return globalTracer.tracer.StartSpan(operationName, opts...) -} - -// InitGlobalTracer is deprecated. Please use SetGlobalTracer. -func InitGlobalTracer(tracer Tracer) { - SetGlobalTracer(tracer) -} - -// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered -func IsGlobalTracerRegistered() bool { - return globalTracer.isRegistered -} diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod deleted file mode 100644 index bf48bb5d73f7..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module github.com/opentracing/opentracing-go - -go 1.14 - -require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go deleted file mode 100644 index 1831bc9b2637..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ /dev/null @@ -1,65 +0,0 @@ -package opentracing - -import "context" - -type contextKey struct{} - -var activeSpanKey = contextKey{} - -// ContextWithSpan returns a new `context.Context` that holds a reference to -// the span. If span is nil, a new context without an active span is returned. -func ContextWithSpan(ctx context.Context, span Span) context.Context { - if span != nil { - if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { - ctx = tracerWithHook.ContextWithSpanHook(ctx, span) - } - } - return context.WithValue(ctx, activeSpanKey, span) -} - -// SpanFromContext returns the `Span` previously associated with `ctx`, or -// `nil` if no such `Span` could be found. -// -// NOTE: context.Context != SpanContext: the former is Go's intra-process -// context propagation mechanism, and the latter houses OpenTracing's per-Span -// identity and baggage information. -func SpanFromContext(ctx context.Context) Span { - val := ctx.Value(activeSpanKey) - if sp, ok := val.(Span); ok { - return sp - } - return nil -} - -// StartSpanFromContext starts and returns a Span with `operationName`, using -// any Span found within `ctx` as a ChildOfRef. If no such parent could be -// found, StartSpanFromContext creates a root (parentless) Span. -// -// The second return value is a context.Context object built around the -// returned Span. -// -// Example usage: -// -// SomeFunction(ctx context.Context, ...) { -// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") -// defer sp.Finish() -// ... -// } -func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { - return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) -} - -// StartSpanFromContextWithTracer starts and returns a span with `operationName` -// using a span found within the context as a ChildOfRef. If that doesn't exist -// it creates a root span. It also returns a context.Context object built -// around the returned span. -// -// It's behavior is identical to StartSpanFromContext except that it takes an explicit -// tracer as opposed to using the global tracer. -func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { - if parentSpan := SpanFromContext(ctx); parentSpan != nil { - opts = append(opts, ChildOf(parentSpan.Context())) - } - span := tracer.StartSpan(operationName, opts...) - return span, ContextWithSpan(ctx, span) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go deleted file mode 100644 index f222ded797c1..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/field.go +++ /dev/null @@ -1,282 +0,0 @@ -package log - -import ( - "fmt" - "math" -) - -type fieldType int - -const ( - stringType fieldType = iota - boolType - intType - int32Type - uint32Type - int64Type - uint64Type - float32Type - float64Type - errorType - objectType - lazyLoggerType - noopType -) - -// Field instances are constructed via LogBool, LogString, and so on. -// Tracing implementations may then handle them via the Field.Marshal -// method. -// -// "heavily influenced by" (i.e., partially stolen from) -// https://github.com/uber-go/zap -type Field struct { - key string - fieldType fieldType - numericVal int64 - stringVal string - interfaceVal interface{} -} - -// String adds a string-valued key:value pair to a Span.LogFields() record -func String(key, val string) Field { - return Field{ - key: key, - fieldType: stringType, - stringVal: val, - } -} - -// Bool adds a bool-valued key:value pair to a Span.LogFields() record -func Bool(key string, val bool) Field { - var numericVal int64 - if val { - numericVal = 1 - } - return Field{ - key: key, - fieldType: boolType, - numericVal: numericVal, - } -} - -// Int adds an int-valued key:value pair to a Span.LogFields() record -func Int(key string, val int) Field { - return Field{ - key: key, - fieldType: intType, - numericVal: int64(val), - } -} - -// Int32 adds an int32-valued key:value pair to a Span.LogFields() record -func Int32(key string, val int32) Field { - return Field{ - key: key, - fieldType: int32Type, - numericVal: int64(val), - } -} - -// Int64 adds an int64-valued key:value pair to a Span.LogFields() record -func Int64(key string, val int64) Field { - return Field{ - key: key, - fieldType: int64Type, - numericVal: val, - } -} - -// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record -func Uint32(key string, val uint32) Field { - return Field{ - key: key, - fieldType: uint32Type, - numericVal: int64(val), - } -} - -// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record -func Uint64(key string, val uint64) Field { - return Field{ - key: key, - fieldType: uint64Type, - numericVal: int64(val), - } -} - -// Float32 adds a float32-valued key:value pair to a Span.LogFields() record -func Float32(key string, val float32) Field { - return Field{ - key: key, - fieldType: float32Type, - numericVal: int64(math.Float32bits(val)), - } -} - -// Float64 adds a float64-valued key:value pair to a Span.LogFields() record -func Float64(key string, val float64) Field { - return Field{ - key: key, - fieldType: float64Type, - numericVal: int64(math.Float64bits(val)), - } -} - -// Error adds an error with the key "error.object" to a Span.LogFields() record -func Error(err error) Field { - return Field{ - key: "error.object", - fieldType: errorType, - interfaceVal: err, - } -} - -// Object adds an object-valued key:value pair to a Span.LogFields() record -// Please pass in an immutable object, otherwise there may be concurrency issues. -// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write". -// Because span is sent asynchronously, it is possible that this map will also be modified. -func Object(key string, obj interface{}) Field { - return Field{ - key: key, - fieldType: objectType, - interfaceVal: obj, - } -} - -// Event creates a string-valued Field for span logs with key="event" and value=val. -func Event(val string) Field { - return String("event", val) -} - -// Message creates a string-valued Field for span logs with key="message" and value=val. -func Message(val string) Field { - return String("message", val) -} - -// LazyLogger allows for user-defined, late-bound logging of arbitrary data -type LazyLogger func(fv Encoder) - -// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing -// implementation will call the LazyLogger function at an indefinite time in -// the future (after Lazy() returns). -func Lazy(ll LazyLogger) Field { - return Field{ - fieldType: lazyLoggerType, - interfaceVal: ll, - } -} - -// Noop creates a no-op log field that should be ignored by the tracer. -// It can be used to capture optional fields, for example those that should -// only be logged in non-production environment: -// -// func customerField(order *Order) log.Field { -// if os.Getenv("ENVIRONMENT") == "dev" { -// return log.String("customer", order.Customer.ID) -// } -// return log.Noop() -// } -// -// span.LogFields(log.String("event", "purchase"), customerField(order)) -// -func Noop() Field { - return Field{ - fieldType: noopType, - } -} - -// Encoder allows access to the contents of a Field (via a call to -// Field.Marshal). -// -// Tracer implementations typically provide an implementation of Encoder; -// OpenTracing callers typically do not need to concern themselves with it. -type Encoder interface { - EmitString(key, value string) - EmitBool(key string, value bool) - EmitInt(key string, value int) - EmitInt32(key string, value int32) - EmitInt64(key string, value int64) - EmitUint32(key string, value uint32) - EmitUint64(key string, value uint64) - EmitFloat32(key string, value float32) - EmitFloat64(key string, value float64) - EmitObject(key string, value interface{}) - EmitLazyLogger(value LazyLogger) -} - -// Marshal passes a Field instance through to the appropriate -// field-type-specific method of an Encoder. -func (lf Field) Marshal(visitor Encoder) { - switch lf.fieldType { - case stringType: - visitor.EmitString(lf.key, lf.stringVal) - case boolType: - visitor.EmitBool(lf.key, lf.numericVal != 0) - case intType: - visitor.EmitInt(lf.key, int(lf.numericVal)) - case int32Type: - visitor.EmitInt32(lf.key, int32(lf.numericVal)) - case int64Type: - visitor.EmitInt64(lf.key, int64(lf.numericVal)) - case uint32Type: - visitor.EmitUint32(lf.key, uint32(lf.numericVal)) - case uint64Type: - visitor.EmitUint64(lf.key, uint64(lf.numericVal)) - case float32Type: - visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) - case float64Type: - visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) - case errorType: - if err, ok := lf.interfaceVal.(error); ok { - visitor.EmitString(lf.key, err.Error()) - } else { - visitor.EmitString(lf.key, "") - } - case objectType: - visitor.EmitObject(lf.key, lf.interfaceVal) - case lazyLoggerType: - visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) - case noopType: - // intentionally left blank - } -} - -// Key returns the field's key. -func (lf Field) Key() string { - return lf.key -} - -// Value returns the field's value as interface{}. -func (lf Field) Value() interface{} { - switch lf.fieldType { - case stringType: - return lf.stringVal - case boolType: - return lf.numericVal != 0 - case intType: - return int(lf.numericVal) - case int32Type: - return int32(lf.numericVal) - case int64Type: - return int64(lf.numericVal) - case uint32Type: - return uint32(lf.numericVal) - case uint64Type: - return uint64(lf.numericVal) - case float32Type: - return math.Float32frombits(uint32(lf.numericVal)) - case float64Type: - return math.Float64frombits(uint64(lf.numericVal)) - case errorType, objectType, lazyLoggerType: - return lf.interfaceVal - case noopType: - return nil - default: - return nil - } -} - -// String returns a string representation of the key and value. -func (lf Field) String() string { - return fmt.Sprint(lf.key, ":", lf.Value()) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go deleted file mode 100644 index d57e28aa57f3..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/util.go +++ /dev/null @@ -1,61 +0,0 @@ -package log - -import ( - "fmt" - "reflect" -) - -// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice -// a la Span.LogFields(). -func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { - if len(keyValues)%2 != 0 { - return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) - } - fields := make([]Field, len(keyValues)/2) - for i := 0; i*2 < len(keyValues); i++ { - key, ok := keyValues[i*2].(string) - if !ok { - return nil, fmt.Errorf( - "non-string key (pair #%d): %T", - i, keyValues[i*2]) - } - switch typedVal := keyValues[i*2+1].(type) { - case bool: - fields[i] = Bool(key, typedVal) - case string: - fields[i] = String(key, typedVal) - case int: - fields[i] = Int(key, typedVal) - case int8: - fields[i] = Int32(key, int32(typedVal)) - case int16: - fields[i] = Int32(key, int32(typedVal)) - case int32: - fields[i] = Int32(key, typedVal) - case int64: - fields[i] = Int64(key, typedVal) - case uint: - fields[i] = Uint64(key, uint64(typedVal)) - case uint64: - fields[i] = Uint64(key, typedVal) - case uint8: - fields[i] = Uint32(key, uint32(typedVal)) - case uint16: - fields[i] = Uint32(key, uint32(typedVal)) - case uint32: - fields[i] = Uint32(key, typedVal) - case float32: - fields[i] = Float32(key, typedVal) - case float64: - fields[i] = Float64(key, typedVal) - default: - if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) { - fields[i] = String(key, "nil") - continue - } - // When in doubt, coerce to a string - fields[i] = String(key, fmt.Sprint(typedVal)) - } - } - return fields, nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go deleted file mode 100644 index f9b680a213de..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/noop.go +++ /dev/null @@ -1,64 +0,0 @@ -package opentracing - -import "github.com/opentracing/opentracing-go/log" - -// A NoopTracer is a trivial, minimum overhead implementation of Tracer -// for which all operations are no-ops. -// -// The primary use of this implementation is in libraries, such as RPC -// frameworks, that make tracing an optional feature controlled by the -// end user. A no-op implementation allows said libraries to use it -// as the default Tracer and to write instrumentation that does -// not need to keep checking if the tracer instance is nil. -// -// For the same reason, the NoopTracer is the default "global" tracer -// (see GlobalTracer and SetGlobalTracer functions). -// -// WARNING: NoopTracer does not support baggage propagation. -type NoopTracer struct{} - -type noopSpan struct{} -type noopSpanContext struct{} - -var ( - defaultNoopSpanContext SpanContext = noopSpanContext{} - defaultNoopSpan Span = noopSpan{} - defaultNoopTracer Tracer = NoopTracer{} -) - -const ( - emptyString = "" -) - -// noopSpanContext: -func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} - -// noopSpan: -func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } -func (n noopSpan) SetBaggageItem(key, val string) Span { return n } -func (n noopSpan) BaggageItem(key string) string { return emptyString } -func (n noopSpan) SetTag(key string, value interface{}) Span { return n } -func (n noopSpan) LogFields(fields ...log.Field) {} -func (n noopSpan) LogKV(keyVals ...interface{}) {} -func (n noopSpan) Finish() {} -func (n noopSpan) FinishWithOptions(opts FinishOptions) {} -func (n noopSpan) SetOperationName(operationName string) Span { return n } -func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } -func (n noopSpan) LogEvent(event string) {} -func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} -func (n noopSpan) Log(data LogData) {} - -// StartSpan belongs to the Tracer interface. -func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { - return defaultNoopSpan -} - -// Inject belongs to the Tracer interface. -func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { - return nil -} - -// Extract belongs to the Tracer interface. -func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { - return nil, ErrSpanContextNotFound -} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go deleted file mode 100644 index b0c275eb05e4..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/propagation.go +++ /dev/null @@ -1,176 +0,0 @@ -package opentracing - -import ( - "errors" - "net/http" -) - -/////////////////////////////////////////////////////////////////////////////// -// CORE PROPAGATION INTERFACES: -/////////////////////////////////////////////////////////////////////////////// - -var ( - // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or - // Tracer.Extract() is not recognized by the Tracer implementation. - ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") - - // ErrSpanContextNotFound occurs when the `carrier` passed to - // Tracer.Extract() is valid and uncorrupted but has insufficient - // information to extract a SpanContext. - ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") - - // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to - // operate on a SpanContext which it is not prepared to handle (for - // example, since it was created by a different tracer implementation). - ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") - - // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() - // implementations expect a different type of `carrier` than they are - // given. - ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") - - // ErrSpanContextCorrupted occurs when the `carrier` passed to - // Tracer.Extract() is of the expected type but is corrupted. - ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") -) - -/////////////////////////////////////////////////////////////////////////////// -// BUILTIN PROPAGATION FORMATS: -/////////////////////////////////////////////////////////////////////////////// - -// BuiltinFormat is used to demarcate the values within package `opentracing` -// that are intended for use with the Tracer.Inject() and Tracer.Extract() -// methods. -type BuiltinFormat byte - -const ( - // Binary represents SpanContexts as opaque binary data. - // - // For Tracer.Inject(): the carrier must be an `io.Writer`. - // - // For Tracer.Extract(): the carrier must be an `io.Reader`. - Binary BuiltinFormat = iota - - // TextMap represents SpanContexts as key:value string pairs. - // - // Unlike HTTPHeaders, the TextMap format does not restrict the key or - // value character sets in any way. - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - TextMap - - // HTTPHeaders represents SpanContexts as HTTP header string pairs. - // - // Unlike TextMap, the HTTPHeaders format requires that the keys and values - // be valid as HTTP headers as-is (i.e., character casing may be unstable - // and special characters are disallowed in keys, values should be - // URL-escaped, etc). - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - // - // See HTTPHeadersCarrier for an implementation of both TextMapWriter - // and TextMapReader that defers to an http.Header instance for storage. - // For example, Inject(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := span.Tracer().Inject( - // span.Context(), opentracing.HTTPHeaders, carrier) - // - // Or Extract(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract( - // opentracing.HTTPHeaders, carrier) - // - HTTPHeaders -) - -// TextMapWriter is the Inject() carrier for the TextMap builtin format. With -// it, the caller can encode a SpanContext for propagation as entries in a map -// of unicode strings. -type TextMapWriter interface { - // Set a key:value pair to the carrier. Multiple calls to Set() for the - // same key leads to undefined behavior. - // - // NOTE: The backing store for the TextMapWriter may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - Set(key, val string) -} - -// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, -// the caller can decode a propagated SpanContext as entries in a map of -// unicode strings. -type TextMapReader interface { - // ForeachKey returns TextMap contents via repeated calls to the `handler` - // function. If any call to `handler` returns a non-nil error, ForeachKey - // terminates and returns that error. - // - // NOTE: The backing store for the TextMapReader may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - // - // The "foreach" callback pattern reduces unnecessary copying in some cases - // and also allows implementations to hold locks while the map is read. - ForeachKey(handler func(key, val string) error) error -} - -// TextMapCarrier allows the use of regular map[string]string -// as both TextMapWriter and TextMapReader. -type TextMapCarrier map[string]string - -// ForeachKey conforms to the TextMapReader interface. -func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { - for k, v := range c { - if err := handler(k, v); err != nil { - return err - } - } - return nil -} - -// Set implements Set() of opentracing.TextMapWriter -func (c TextMapCarrier) Set(key, val string) { - c[key] = val -} - -// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. -// -// Example usage for server side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) -// -// Example usage for client side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// err := tracer.Inject( -// span.Context(), -// opentracing.HTTPHeaders, -// carrier) -// -type HTTPHeadersCarrier http.Header - -// Set conforms to the TextMapWriter interface. -func (c HTTPHeadersCarrier) Set(key, val string) { - h := http.Header(c) - h.Set(key, val) -} - -// ForeachKey conforms to the TextMapReader interface. -func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { - for k, vals := range c { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go deleted file mode 100644 index 0d3fb5341838..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/span.go +++ /dev/null @@ -1,189 +0,0 @@ -package opentracing - -import ( - "time" - - "github.com/opentracing/opentracing-go/log" -) - -// SpanContext represents Span state that must propagate to descendant Spans and across process -// boundaries (e.g., a tuple). -type SpanContext interface { - // ForeachBaggageItem grants access to all baggage items stored in the - // SpanContext. - // The handler function will be called for each baggage key/value pair. - // The ordering of items is not guaranteed. - // - // The bool return value indicates if the handler wants to continue iterating - // through the rest of the baggage items; for example if the handler is trying to - // find some baggage item by pattern matching the name, it can return false - // as soon as the item is found to stop further iterations. - ForeachBaggageItem(handler func(k, v string) bool) -} - -// Span represents an active, un-finished span in the OpenTracing system. -// -// Spans are created by the Tracer interface. -type Span interface { - // Sets the end timestamp and finalizes Span state. - // - // With the exception of calls to Context() (which are always allowed), - // Finish() must be the last call made to any span instance, and to do - // otherwise leads to undefined behavior. - Finish() - // FinishWithOptions is like Finish() but with explicit control over - // timestamps and log data. - FinishWithOptions(opts FinishOptions) - - // Context() yields the SpanContext for this Span. Note that the return - // value of Context() is still valid after a call to Span.Finish(), as is - // a call to Span.Context() after a call to Span.Finish(). - Context() SpanContext - - // Sets or changes the operation name. - // - // Returns a reference to this Span for chaining. - SetOperationName(operationName string) Span - - // Adds a tag to the span. - // - // If there is a pre-existing tag set for `key`, it is overwritten. - // - // Tag values can be numeric types, strings, or bools. The behavior of - // other tag value types is undefined at the OpenTracing level. If a - // tracing system does not know how to handle a particular value type, it - // may ignore the tag, but shall not panic. - // - // Returns a reference to this Span for chaining. - SetTag(key string, value interface{}) Span - - // LogFields is an efficient and type-checked way to record key:value - // logging data about a Span, though the programming interface is a little - // more verbose than LogKV(). Here's an example: - // - // span.LogFields( - // log.String("event", "soft error"), - // log.String("type", "cache timeout"), - // log.Int("waited.millis", 1500)) - // - // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. - LogFields(fields ...log.Field) - - // LogKV is a concise, readable way to record key:value logging data about - // a Span, though unfortunately this also makes it less efficient and less - // type-safe than LogFields(). Here's an example: - // - // span.LogKV( - // "event", "soft error", - // "type", "cache timeout", - // "waited.millis", 1500) - // - // For LogKV (as opposed to LogFields()), the parameters must appear as - // key-value pairs, like - // - // span.LogKV(key1, val1, key2, val2, key3, val3, ...) - // - // The keys must all be strings. The values may be strings, numeric types, - // bools, Go error instances, or arbitrary structs. - // - // (Note to implementors: consider the log.InterleavedKVToFields() helper) - LogKV(alternatingKeyValues ...interface{}) - - // SetBaggageItem sets a key:value pair on this Span and its SpanContext - // that also propagates to descendants of this Span. - // - // SetBaggageItem() enables powerful functionality given a full-stack - // opentracing integration (e.g., arbitrary application data from a mobile - // app can make it, transparently, all the way into the depths of a storage - // system), and with it some powerful costs: use this feature with care. - // - // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to - // *future* causal descendants of the associated Span. - // - // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and - // value is copied into every local *and remote* child of the associated - // Span, and that can add up to a lot of network and cpu overhead. - // - // Returns a reference to this Span for chaining. - SetBaggageItem(restrictedKey, value string) Span - - // Gets the value for a baggage item given its key. Returns the empty string - // if the value isn't found in this Span. - BaggageItem(restrictedKey string) string - - // Provides access to the Tracer that created this Span. - Tracer() Tracer - - // Deprecated: use LogFields or LogKV - LogEvent(event string) - // Deprecated: use LogFields or LogKV - LogEventWithPayload(event string, payload interface{}) - // Deprecated: use LogFields or LogKV - Log(data LogData) -} - -// LogRecord is data associated with a single Span log. Every LogRecord -// instance must specify at least one Field. -type LogRecord struct { - Timestamp time.Time - Fields []log.Field -} - -// FinishOptions allows Span.FinishWithOptions callers to override the finish -// timestamp and provide log data via a bulk interface. -type FinishOptions struct { - // FinishTime overrides the Span's finish time, or implicitly becomes - // time.Now() if FinishTime.IsZero(). - // - // FinishTime must resolve to a timestamp that's >= the Span's StartTime - // (per StartSpanOptions). - FinishTime time.Time - - // LogRecords allows the caller to specify the contents of many LogFields() - // calls with a single slice. May be nil. - // - // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must - // be set explicitly). Also, they must be >= the Span's start timestamp and - // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the - // behavior of FinishWithOptions() is undefined. - // - // If specified, the caller hands off ownership of LogRecords at - // FinishWithOptions() invocation time. - // - // If specified, the (deprecated) BulkLogData must be nil or empty. - LogRecords []LogRecord - - // BulkLogData is DEPRECATED. - BulkLogData []LogData -} - -// LogData is DEPRECATED -type LogData struct { - Timestamp time.Time - Event string - Payload interface{} -} - -// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord -func (ld *LogData) ToLogRecord() LogRecord { - var literalTimestamp time.Time - if ld.Timestamp.IsZero() { - literalTimestamp = time.Now() - } else { - literalTimestamp = ld.Timestamp - } - rval := LogRecord{ - Timestamp: literalTimestamp, - } - if ld.Payload == nil { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - } - } else { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - log.Object("payload", ld.Payload), - } - } - return rval -} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go deleted file mode 100644 index 715f0cedfb60..000000000000 --- a/vendor/github.com/opentracing/opentracing-go/tracer.go +++ /dev/null @@ -1,304 +0,0 @@ -package opentracing - -import "time" - -// Tracer is a simple, thin interface for Span creation and SpanContext -// propagation. -type Tracer interface { - - // Create, start, and return a new Span with the given `operationName` and - // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows - // from the "functional options" pattern, per - // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) - // - // A Span with no SpanReference options (e.g., opentracing.ChildOf() or - // opentracing.FollowsFrom()) becomes the root of its own trace. - // - // Examples: - // - // var tracer opentracing.Tracer = ... - // - // // The root-span case: - // sp := tracer.StartSpan("GetFeed") - // - // // The vanilla child span case: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context())) - // - // // All the bells and whistles: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context()), - // opentracing.Tag{"user_agent", loggedReq.UserAgent}, - // opentracing.StartTime(loggedReq.Timestamp), - // ) - // - StartSpan(operationName string, opts ...StartSpanOption) Span - - // Inject() takes the `sm` SpanContext instance and injects it for - // propagation within `carrier`. The actual type of `carrier` depends on - // the value of `format`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see https://godoc.org/context#WithValue). - // - // Example usage (sans error handling): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := tracer.Inject( - // span.Context(), - // opentracing.HTTPHeaders, - // carrier) - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Implementations may return opentracing.ErrUnsupportedFormat if `format` - // is not supported by (or not known by) the implementation. - // - // Implementations may return opentracing.ErrInvalidCarrier or any other - // implementation-specific error if the format is supported but injection - // fails anyway. - // - // See Tracer.Extract(). - Inject(sm SpanContext, format interface{}, carrier interface{}) error - - // Extract() returns a SpanContext instance given `format` and `carrier`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see - // https://godoc.org/golang.org/x/net/context#WithValue). - // - // Example usage (with StartSpan): - // - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) - // - // // ... assuming the ultimate goal here is to resume the trace with a - // // server-side Span: - // var serverSpan opentracing.Span - // if err == nil { - // span = tracer.StartSpan( - // rpcMethodName, ext.RPCServerOption(clientContext)) - // } else { - // span = tracer.StartSpan(rpcMethodName) - // } - // - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Return values: - // - A successful Extract returns a SpanContext instance and a nil error - // - If there was simply no SpanContext to extract in `carrier`, Extract() - // returns (nil, opentracing.ErrSpanContextNotFound) - // - If `format` is unsupported or unrecognized, Extract() returns (nil, - // opentracing.ErrUnsupportedFormat) - // - If there are more fundamental problems with the `carrier` object, - // Extract() may return opentracing.ErrInvalidCarrier, - // opentracing.ErrSpanContextCorrupted, or implementation-specific - // errors. - // - // See Tracer.Inject(). - Extract(format interface{}, carrier interface{}) (SpanContext, error) -} - -// StartSpanOptions allows Tracer.StartSpan() callers and implementors a -// mechanism to override the start timestamp, specify Span References, and make -// a single Tag or multiple Tags available at Span start time. -// -// StartSpan() callers should look at the StartSpanOption interface and -// implementations available in this package. -// -// Tracer implementations can convert a slice of `StartSpanOption` instances -// into a `StartSpanOptions` struct like so: -// -// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { -// sso := opentracing.StartSpanOptions{} -// for _, o := range opts { -// o.Apply(&sso) -// } -// ... -// } -// -type StartSpanOptions struct { - // Zero or more causal references to other Spans (via their SpanContext). - // If empty, start a "root" Span (i.e., start a new trace). - References []SpanReference - - // StartTime overrides the Span's start time, or implicitly becomes - // time.Now() if StartTime.IsZero(). - StartTime time.Time - - // Tags may have zero or more entries; the restrictions on map values are - // identical to those for Span.SetTag(). May be nil. - // - // If specified, the caller hands off ownership of Tags at - // StartSpan() invocation time. - Tags map[string]interface{} -} - -// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. -// -// StartSpanOption borrows from the "functional options" pattern, per -// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis -type StartSpanOption interface { - Apply(*StartSpanOptions) -} - -// SpanReferenceType is an enum type describing different categories of -// relationships between two Spans. If Span-2 refers to Span-1, the -// SpanReferenceType describes Span-1 from Span-2's perspective. For example, -// ChildOfRef means that Span-1 created Span-2. -// -// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for -// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, -// or Span-2 may be sitting in a distributed queue behind Span-1. -type SpanReferenceType int - -const ( - // ChildOfRef refers to a parent Span that caused *and* somehow depends - // upon the new child Span. Often (but not always), the parent Span cannot - // finish until the child Span does. - // - // An timing diagram for a ChildOfRef that's blocked on the new Span: - // - // [-Parent Span---------] - // [-Child Span----] - // - // See http://opentracing.io/spec/ - // - // See opentracing.ChildOf() - ChildOfRef SpanReferenceType = iota - - // FollowsFromRef refers to a parent Span that does not depend in any way - // on the result of the new child Span. For instance, one might use - // FollowsFromRefs to describe pipeline stages separated by queues, - // or a fire-and-forget cache insert at the tail end of a web request. - // - // A FollowsFromRef Span is part of the same logical trace as the new Span: - // i.e., the new Span is somehow caused by the work of its FollowsFromRef. - // - // All of the following could be valid timing diagrams for children that - // "FollowFrom" a parent. - // - // [-Parent Span-] [-Child Span-] - // - // - // [-Parent Span--] - // [-Child Span-] - // - // - // [-Parent Span-] - // [-Child Span-] - // - // See http://opentracing.io/spec/ - // - // See opentracing.FollowsFrom() - FollowsFromRef -) - -// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a -// referenced SpanContext. See the SpanReferenceType documentation for -// supported relationships. If SpanReference is created with -// ReferencedContext==nil, it has no effect. Thus it allows for a more concise -// syntax for starting spans: -// -// sc, _ := tracer.Extract(someFormat, someCarrier) -// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) -// -// The `ChildOf(sc)` option above will not panic if sc == nil, it will just -// not add the parent span reference to the options. -type SpanReference struct { - Type SpanReferenceType - ReferencedContext SpanContext -} - -// Apply satisfies the StartSpanOption interface. -func (r SpanReference) Apply(o *StartSpanOptions) { - if r.ReferencedContext != nil { - o.References = append(o.References, r) - } -} - -// ChildOf returns a StartSpanOption pointing to a dependent parent span. -// If sc == nil, the option has no effect. -// -// See ChildOfRef, SpanReference -func ChildOf(sc SpanContext) SpanReference { - return SpanReference{ - Type: ChildOfRef, - ReferencedContext: sc, - } -} - -// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused -// the child Span but does not directly depend on its result in any way. -// If sc == nil, the option has no effect. -// -// See FollowsFromRef, SpanReference -func FollowsFrom(sc SpanContext) SpanReference { - return SpanReference{ - Type: FollowsFromRef, - ReferencedContext: sc, - } -} - -// StartTime is a StartSpanOption that sets an explicit start timestamp for the -// new Span. -type StartTime time.Time - -// Apply satisfies the StartSpanOption interface. -func (t StartTime) Apply(o *StartSpanOptions) { - o.StartTime = time.Time(t) -} - -// Tags are a generic map from an arbitrary string key to an opaque value type. -// The underlying tracing system is responsible for interpreting and -// serializing the values. -type Tags map[string]interface{} - -// Apply satisfies the StartSpanOption interface. -func (t Tags) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - for k, v := range t { - o.Tags[k] = v - } -} - -// Tag may be passed as a StartSpanOption to add a tag to new spans, -// or its Set method may be used to apply the tag to an existing Span, -// for example: -// -// tracer.StartSpan("opName", Tag{"Key", value}) -// -// or -// -// Tag{"key", value}.Set(span) -type Tag struct { - Key string - Value interface{} -} - -// Apply satisfies the StartSpanOption interface. -func (t Tag) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - o.Tags[t.Key] = t.Value -} - -// Set applies the tag to an existing Span. -func (t Tag) Set(s Span) { - s.SetTag(t.Key, t.Value) -} diff --git a/vendor/github.com/tonistiigi/fsutil/LICENSE b/vendor/github.com/tonistiigi/fsutil/LICENSE deleted file mode 100644 index 7df441d93e77..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -MIT - -Copyright 2017 Tõnis Tiigi - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go deleted file mode 100644 index 74f08a15caa7..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build linux - -package fsutil - -import ( - "github.com/pkg/errors" - "golang.org/x/sys/unix" -) - -func chtimes(path string, un int64) error { - var utimes [2]unix.Timespec - utimes[0] = unix.NsecToTimespec(un) - utimes[1] = utimes[0] - - if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrap(err, "failed call to UtimesNanoAt") - } - - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go b/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go deleted file mode 100644 index a3ba09881d59..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/chtimes_nolinux.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !linux - -package fsutil - -import ( - "os" - "time" - - "github.com/pkg/errors" -) - -func chtimes(path string, un int64) error { - mtime := time.Unix(0, un) - fi, err := os.Lstat(path) - if err != nil { - return errors.WithStack(err) - } - if fi.Mode()&os.ModeSymlink != 0 { - return nil - } - return errors.WithStack(os.Chtimes(path, mtime, mtime)) -} diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go deleted file mode 100644 index a7405dc5332c..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/diff.go +++ /dev/null @@ -1,51 +0,0 @@ -package fsutil - -import ( - "context" - "hash" - "os" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error - -func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error { - return nil -} - -type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error - -type ContentHasher func(*types.Stat) (hash.Hash, error) - -func getWalkerFn(root string) walkerFn { - return func(ctx context.Context, pathC chan<- *currentPath) error { - return errors.Wrap(Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - stat, ok := f.Sys().(*types.Stat) - if !ok { - return errors.Errorf("%T invalid file without stat information", f.Sys()) - } - - p := ¤tPath{ - path: path, - stat: stat, - } - - select { - case <-ctx.Done(): - return ctx.Err() - case pathC <- p: - return nil - } - }), "failed to walk") - } -} - -func emptyWalker(ctx context.Context, pathC chan<- *currentPath) error { - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd.go deleted file mode 100644 index d72f4e64fec1..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/diff_containerd.go +++ /dev/null @@ -1,240 +0,0 @@ -package fsutil - -import ( - "bytes" - "context" - "io" - "os" - "strings" - - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -// Everything below is copied from containerd/fs. TODO: remove duplication @dmcgowan - -// Const redefined because containerd/fs doesn't build on !linux - -// ChangeKind is the type of modification that -// a change is making. -type ChangeKind int - -const ( - // ChangeKindAdd represents an addition of - // a file - ChangeKindAdd ChangeKind = iota - - // ChangeKindModify represents a change to - // an existing file - ChangeKindModify - - // ChangeKindDelete represents a delete of - // a file - ChangeKindDelete -) - -// ChangeFunc is the type of function called for each change -// computed during a directory changes calculation. -type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error - -const compareChunkSize = 32 * 1024 - -type currentPath struct { - path string - stat *types.Stat - // fullPath string -} - -// doubleWalkDiff walks both directories to create a diff -func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b walkerFn, filter FilterFunc, differ DiffType) (err error) { - g, ctx := errgroup.WithContext(ctx) - - var ( - c1 = make(chan *currentPath, 128) - c2 = make(chan *currentPath, 128) - - f1, f2 *currentPath - rmdir string - ) - g.Go(func() error { - defer close(c1) - return a(ctx, c1) - }) - g.Go(func() error { - defer close(c2) - return b(ctx, c2) - }) - g.Go(func() error { - loop0: - for c1 != nil || c2 != nil { - if f1 == nil && c1 != nil { - f1, err = nextPath(ctx, c1) - if err != nil { - return err - } - if f1 == nil { - c1 = nil - } - } - - if f2 == nil && c2 != nil { - f2, err = nextPath(ctx, c2) - if err != nil { - return err - } - if f2 == nil { - c2 = nil - } - } - if f1 == nil && f2 == nil { - continue - } - - var f *types.Stat - var f2copy *currentPath - if f2 != nil { - statCopy := *f2.stat - if filter != nil { - filter(f2.path, &statCopy) - } - f2copy = ¤tPath{path: f2.path, stat: &statCopy} - } - k, p := pathChange(f1, f2copy) - switch k { - case ChangeKindAdd: - if rmdir != "" { - rmdir = "" - } - f = f2.stat - f2 = nil - case ChangeKindDelete: - // Check if this file is already removed by being - // under of a removed directory - if rmdir != "" && strings.HasPrefix(f1.path, rmdir) { - f1 = nil - continue - } else if rmdir == "" && f1.stat.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f1 = nil - case ChangeKindModify: - same, err := sameFile(f1, f2copy, differ) - if err != nil { - return err - } - if f1.stat.IsDir() && !f2copy.stat.IsDir() { - rmdir = f1.path + string(os.PathSeparator) - } else if rmdir != "" { - rmdir = "" - } - f = f2.stat - f1 = nil - f2 = nil - if same { - continue loop0 - } - } - if err := changeFn(k, p, &StatInfo{f}, nil); err != nil { - return err - } - } - return nil - }) - - return g.Wait() -} - -func pathChange(lower, upper *currentPath) (ChangeKind, string) { - if lower == nil { - if upper == nil { - panic("cannot compare nil paths") - } - return ChangeKindAdd, upper.path - } - if upper == nil { - return ChangeKindDelete, lower.path - } - - switch i := ComparePath(lower.path, upper.path); { - case i < 0: - // File in lower that is not in upper - return ChangeKindDelete, lower.path - case i > 0: - // File in upper that is not in lower - return ChangeKindAdd, upper.path - default: - return ChangeKindModify, upper.path - } -} - -func sameFile(f1, f2 *currentPath, differ DiffType) (same bool, retErr error) { - if differ == DiffNone { - return false, nil - } - // If not a directory also check size, modtime, and content - if !f1.stat.IsDir() { - if f1.stat.Size_ != f2.stat.Size_ { - return false, nil - } - - if f1.stat.ModTime != f2.stat.ModTime { - return false, nil - } - } - - same, err := compareStat(f1.stat, f2.stat) - if err != nil || !same || differ == DiffMetadata { - return same, err - } - return compareFileContent(f1.path, f2.path) -} - -func compareFileContent(p1, p2 string) (bool, error) { - f1, err := os.Open(p1) - if err != nil { - return false, err - } - defer f1.Close() - f2, err := os.Open(p2) - if err != nil { - return false, err - } - defer f2.Close() - - b1 := make([]byte, compareChunkSize) - b2 := make([]byte, compareChunkSize) - for { - n1, err1 := f1.Read(b1) - if err1 != nil && err1 != io.EOF { - return false, err1 - } - n2, err2 := f2.Read(b2) - if err2 != nil && err2 != io.EOF { - return false, err2 - } - if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) { - return false, nil - } - if err1 == io.EOF && err2 == io.EOF { - return true, nil - } - } -} - -// compareStat returns whether the stats are equivalent, -// whether the files are considered the same file, and -// an error -func compareStat(ls1, ls2 *types.Stat) (bool, error) { - return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Devmajor == ls2.Devmajor && ls1.Devminor == ls2.Devminor && ls1.Linkname == ls2.Linkname, nil -} - -func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case p := <-pathC: - return p, nil - } -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go deleted file mode 100644 index 786432264f0f..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go +++ /dev/null @@ -1,351 +0,0 @@ -package fsutil - -import ( - "context" - "hash" - "io" - "os" - "path/filepath" - "strconv" - "sync" - "syscall" - "time" - - "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -type WriteToFunc func(context.Context, string, io.WriteCloser) error - -type DiskWriterOpt struct { - AsyncDataCb WriteToFunc - SyncDataCb WriteToFunc - NotifyCb func(ChangeKind, string, os.FileInfo, error) error - ContentHasher ContentHasher - Filter FilterFunc -} - -type FilterFunc func(string, *types.Stat) bool - -type DiskWriter struct { - opt DiskWriterOpt - dest string - - ctx context.Context - cancel func() - eg *errgroup.Group - filter FilterFunc -} - -func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) { - if opt.SyncDataCb == nil && opt.AsyncDataCb == nil { - return nil, errors.New("no data callback specified") - } - if opt.SyncDataCb != nil && opt.AsyncDataCb != nil { - return nil, errors.New("can't specify both sync and async data callbacks") - } - - ctx, cancel := context.WithCancel(ctx) - eg, ctx := errgroup.WithContext(ctx) - - return &DiskWriter{ - opt: opt, - dest: dest, - eg: eg, - ctx: ctx, - cancel: cancel, - filter: opt.Filter, - }, nil -} - -func (dw *DiskWriter) Wait(ctx context.Context) error { - return dw.eg.Wait() -} - -func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - if err != nil { - return err - } - - select { - case <-dw.ctx.Done(): - return dw.ctx.Err() - default: - } - - defer func() { - if retErr != nil { - dw.cancel() - } - }() - - destPath := filepath.Join(dw.dest, filepath.FromSlash(p)) - - if kind == ChangeKindDelete { - if dw.filter != nil { - var empty types.Stat - if ok := dw.filter(p, &empty); !ok { - return nil - } - } - // todo: no need to validate if diff is trusted but is it always? - if err := os.RemoveAll(destPath); err != nil { - return errors.Wrapf(err, "failed to remove: %s", destPath) - } - if dw.opt.NotifyCb != nil { - if err := dw.opt.NotifyCb(kind, p, nil, nil); err != nil { - return err - } - } - return nil - } - - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EBADMSG, Op: "change without stat info"}) - } - - statCopy := *stat - - if dw.filter != nil { - if ok := dw.filter(p, &statCopy); !ok { - return nil - } - } - - rename := true - oldFi, err := os.Lstat(destPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - if kind != ChangeKindAdd { - return errors.Wrap(err, "modify/rm") - } - rename = false - } else { - return errors.WithStack(err) - } - } - - if oldFi != nil && fi.IsDir() && oldFi.IsDir() { - if err := rewriteMetadata(destPath, &statCopy); err != nil { - return errors.Wrapf(err, "error setting dir metadata for %s", destPath) - } - return nil - } - - newPath := destPath - if rename { - newPath = filepath.Join(filepath.Dir(destPath), ".tmp."+nextSuffix()) - } - - isRegularFile := false - - switch { - case fi.IsDir(): - if err := os.Mkdir(newPath, fi.Mode()); err != nil { - return errors.Wrapf(err, "failed to create dir %s", newPath) - } - case fi.Mode()&os.ModeDevice != 0 || fi.Mode()&os.ModeNamedPipe != 0: - if err := handleTarTypeBlockCharFifo(newPath, &statCopy); err != nil { - return errors.Wrapf(err, "failed to create device %s", newPath) - } - case fi.Mode()&os.ModeSymlink != 0: - if err := os.Symlink(statCopy.Linkname, newPath); err != nil { - return errors.Wrapf(err, "failed to symlink %s", newPath) - } - case statCopy.Linkname != "": - if err := os.Link(filepath.Join(dw.dest, statCopy.Linkname), newPath); err != nil { - return errors.Wrapf(err, "failed to link %s to %s", newPath, statCopy.Linkname) - } - default: - isRegularFile = true - file, err := os.OpenFile(newPath, os.O_CREATE|os.O_WRONLY, fi.Mode()) //todo: windows - if err != nil { - return errors.Wrapf(err, "failed to create %s", newPath) - } - if dw.opt.SyncDataCb != nil { - if err := dw.processChange(ChangeKindAdd, p, fi, file); err != nil { - file.Close() - return err - } - break - } - if err := file.Close(); err != nil { - return errors.Wrapf(err, "failed to close %s", newPath) - } - } - - if err := rewriteMetadata(newPath, &statCopy); err != nil { - return errors.Wrapf(err, "error setting metadata for %s", newPath) - } - - if rename { - if oldFi.IsDir() != fi.IsDir() { - if err := os.RemoveAll(destPath); err != nil { - return errors.Wrapf(err, "failed to remove %s", destPath) - } - } - if err := os.Rename(newPath, destPath); err != nil { - return errors.Wrapf(err, "failed to rename %s to %s", newPath, destPath) - } - } - - if isRegularFile { - if dw.opt.AsyncDataCb != nil { - dw.requestAsyncFileData(p, destPath, fi, &statCopy) - } - } else { - return dw.processChange(kind, p, fi, nil) - } - - return nil -} - -func (dw *DiskWriter) requestAsyncFileData(p, dest string, fi os.FileInfo, st *types.Stat) { - // todo: limit worker threads - dw.eg.Go(func() error { - if err := dw.processChange(ChangeKindAdd, p, fi, &lazyFileWriter{ - dest: dest, - }); err != nil { - return err - } - return chtimes(dest, st.ModTime) // TODO: parent dirs - }) -} - -func (dw *DiskWriter) processChange(kind ChangeKind, p string, fi os.FileInfo, w io.WriteCloser) error { - origw := w - var hw *hashedWriter - if dw.opt.NotifyCb != nil { - var err error - if hw, err = newHashWriter(dw.opt.ContentHasher, fi, w); err != nil { - return err - } - w = hw - } - if origw != nil { - fn := dw.opt.SyncDataCb - if fn == nil && dw.opt.AsyncDataCb != nil { - fn = dw.opt.AsyncDataCb - } - if err := fn(dw.ctx, p, w); err != nil { - return err - } - } else { - if hw != nil { - hw.Close() - } - } - if hw != nil { - return dw.opt.NotifyCb(kind, p, hw, nil) - } - return nil -} - -type hashedWriter struct { - os.FileInfo - io.Writer - h hash.Hash - w io.WriteCloser - dgst digest.Digest -} - -func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) { - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return nil, errors.Errorf("invalid change without stat information") - } - - h, err := ch(stat) - if err != nil { - return nil, err - } - hw := &hashedWriter{ - FileInfo: fi, - Writer: io.MultiWriter(w, h), - h: h, - w: w, - } - return hw, nil -} - -func (hw *hashedWriter) Close() error { - hw.dgst = digest.NewDigest(digest.SHA256, hw.h) - if hw.w != nil { - return hw.w.Close() - } - return nil -} - -func (hw *hashedWriter) Digest() digest.Digest { - return hw.dgst -} - -type lazyFileWriter struct { - dest string - f *os.File - fileMode *os.FileMode -} - -func (lfw *lazyFileWriter) Write(dt []byte) (int, error) { - if lfw.f == nil { - file, err := os.OpenFile(lfw.dest, os.O_WRONLY, 0) //todo: windows - if os.IsPermission(err) { - // retry after chmod - fi, er := os.Stat(lfw.dest) - if er == nil { - mode := fi.Mode() - lfw.fileMode = &mode - er = os.Chmod(lfw.dest, mode|0222) - if er == nil { - file, err = os.OpenFile(lfw.dest, os.O_WRONLY, 0) - } - } - } - if err != nil { - return 0, errors.Wrapf(err, "failed to open %s", lfw.dest) - } - lfw.f = file - } - return lfw.f.Write(dt) -} - -func (lfw *lazyFileWriter) Close() error { - var err error - if lfw.f != nil { - err = lfw.f.Close() - } - if err == nil && lfw.fileMode != nil { - err = os.Chmod(lfw.dest, *lfw.fileMode) - } - return err -} - -func mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} - -// Random number state. -// We generate random temporary file names so that there's a good -// chance the file doesn't exist yet - keeps the number of tries in -// TempFile to a minimum. -var rand uint32 -var randmu sync.Mutex - -func reseed() uint32 { - return uint32(time.Now().UnixNano() + int64(os.Getpid())) -} - -func nextSuffix() string { - randmu.Lock() - r := rand - if r == 0 { - r = reseed() - } - r = r*1664525 + 1013904223 // constants from Numerical Recipes - rand = r - randmu.Unlock() - return strconv.Itoa(int(1e9 + r%1e9))[1:] -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go deleted file mode 100644 index aa2d298f40ea..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build !windows - -package fsutil - -import ( - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func rewriteMetadata(p string, stat *types.Stat) error { - for key, value := range stat.Xattrs { - sysx.Setxattr(p, key, value, 0) - } - - if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil { - return errors.WithStack(err) - } - - if os.FileMode(stat.Mode)&os.ModeSymlink == 0 { - if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil { - return errors.WithStack(err) - } - } - - if err := chtimes(p, stat.ModTime); err != nil { - return err - } - - return nil -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { - mode := uint32(stat.Mode & 07777) - if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 { - mode |= syscall.S_IFCHR - } else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 { - mode |= syscall.S_IFIFO - } else { - mode |= syscall.S_IFBLK - } - - if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil { - return errors.WithStack(err) - } - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go deleted file mode 100644 index 036544f0b6fb..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package fsutil - -import ( - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func rewriteMetadata(p string, stat *types.Stat) error { - return chtimes(p, stat.ModTime) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { - return errors.New("Not implemented on windows") -} diff --git a/vendor/github.com/tonistiigi/fsutil/followlinks.go b/vendor/github.com/tonistiigi/fsutil/followlinks.go deleted file mode 100644 index a0942413e811..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/followlinks.go +++ /dev/null @@ -1,150 +0,0 @@ -package fsutil - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - strings "strings" - - "github.com/pkg/errors" -) - -func FollowLinks(root string, paths []string) ([]string, error) { - r := &symlinkResolver{root: root, resolved: map[string]struct{}{}} - for _, p := range paths { - if err := r.append(p); err != nil { - return nil, err - } - } - res := make([]string, 0, len(r.resolved)) - for r := range r.resolved { - res = append(res, r) - } - sort.Strings(res) - return dedupePaths(res), nil -} - -type symlinkResolver struct { - root string - resolved map[string]struct{} -} - -func (r *symlinkResolver) append(p string) error { - p = filepath.Join(".", p) - current := "." - for { - parts := strings.SplitN(p, string(filepath.Separator), 2) - current = filepath.Join(current, parts[0]) - - targets, err := r.readSymlink(current, true) - if err != nil { - return err - } - - p = "" - if len(parts) == 2 { - p = parts[1] - } - - if p == "" || targets != nil { - if _, ok := r.resolved[current]; ok { - return nil - } - } - - if targets != nil { - r.resolved[current] = struct{}{} - for _, target := range targets { - if err := r.append(filepath.Join(target, p)); err != nil { - return err - } - } - return nil - } - - if p == "" { - r.resolved[current] = struct{}{} - return nil - } - } -} - -func (r *symlinkResolver) readSymlink(p string, allowWildcard bool) ([]string, error) { - realPath := filepath.Join(r.root, p) - base := filepath.Base(p) - if allowWildcard && containsWildcards(base) { - fis, err := ioutil.ReadDir(filepath.Dir(realPath)) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil, nil - } - return nil, errors.Wrap(err, "readdir") - } - var out []string - for _, f := range fis { - if ok, _ := filepath.Match(base, f.Name()); ok { - res, err := r.readSymlink(filepath.Join(filepath.Dir(p), f.Name()), false) - if err != nil { - return nil, err - } - out = append(out, res...) - } - } - return out, nil - } - - fi, err := os.Lstat(realPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - return nil, nil - } - return nil, errors.WithStack(err) - } - if fi.Mode()&os.ModeSymlink == 0 { - return nil, nil - } - link, err := os.Readlink(realPath) - if err != nil { - return nil, errors.WithStack(err) - } - link = filepath.Clean(link) - if filepath.IsAbs(link) { - return []string{link}, nil - } - return []string{ - filepath.Join(string(filepath.Separator), filepath.Join(filepath.Dir(p), link)), - }, nil -} - -func containsWildcards(name string) bool { - isWindows := runtime.GOOS == "windows" - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '\\' && !isWindows { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} - -// dedupePaths expects input as a sorted list -func dedupePaths(in []string) []string { - out := make([]string, 0, len(in)) - var last string - for _, s := range in { - // if one of the paths is root there is no filter - if s == "." { - return nil - } - if strings.HasPrefix(s, last+string(filepath.Separator)) { - continue - } - out = append(out, s) - last = s - } - return out -} diff --git a/vendor/github.com/tonistiigi/fsutil/fs.go b/vendor/github.com/tonistiigi/fsutil/fs.go deleted file mode 100644 index e26110b320b3..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/fs.go +++ /dev/null @@ -1,120 +0,0 @@ -package fsutil - -import ( - "context" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "sort" - "strings" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -type FS interface { - Walk(context.Context, filepath.WalkFunc) error - Open(string) (io.ReadCloser, error) -} - -func NewFS(root string, opt *WalkOpt) FS { - return &fs{ - root: root, - opt: opt, - } -} - -type fs struct { - root string - opt *WalkOpt -} - -func (fs *fs) Walk(ctx context.Context, fn filepath.WalkFunc) error { - return Walk(ctx, fs.root, fs.opt, fn) -} - -func (fs *fs) Open(p string) (io.ReadCloser, error) { - rc, err := os.Open(filepath.Join(fs.root, p)) - return rc, errors.WithStack(err) -} - -type Dir struct { - Stat types.Stat - FS FS -} - -func SubDirFS(dirs []Dir) (FS, error) { - sort.Slice(dirs, func(i, j int) bool { - return dirs[i].Stat.Path < dirs[j].Stat.Path - }) - m := map[string]Dir{} - for _, d := range dirs { - if path.Base(d.Stat.Path) != d.Stat.Path { - return nil, errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EISDIR, Op: "invalid path"}) - } - if _, ok := m[d.Stat.Path]; ok { - return nil, errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EEXIST, Op: "duplicate path"}) - } - m[d.Stat.Path] = d - } - return &subDirFS{m: m, dirs: dirs}, nil -} - -type subDirFS struct { - m map[string]Dir - dirs []Dir -} - -func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error { - for _, d := range fs.dirs { - fi := &StatInfo{Stat: &d.Stat} - if !fi.IsDir() { - return errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.ENOTDIR, Op: "walk subdir"}) - } - if err := fn(d.Stat.Path, fi, nil); err != nil { - return err - } - if err := d.FS.Walk(ctx, func(p string, fi os.FileInfo, err error) error { - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: d.Stat.Path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) - } - stat.Path = path.Join(d.Stat.Path, stat.Path) - if stat.Linkname != "" { - if fi.Mode()&os.ModeSymlink != 0 { - if strings.HasPrefix(stat.Linkname, "/") { - stat.Linkname = path.Join("/"+d.Stat.Path, stat.Linkname) - } - } else { - stat.Linkname = path.Join(d.Stat.Path, stat.Linkname) - } - } - return fn(filepath.Join(d.Stat.Path, p), &StatInfo{stat}, nil) - }); err != nil { - return err - } - } - return nil -} - -func (fs *subDirFS) Open(p string) (io.ReadCloser, error) { - parts := strings.SplitN(filepath.Clean(p), string(filepath.Separator), 2) - if len(parts) == 0 { - return ioutil.NopCloser(&emptyReader{}), nil - } - d, ok := fs.m[parts[0]] - if !ok { - return nil, errors.WithStack(&os.PathError{Path: parts[0], Err: syscall.ENOENT, Op: "open"}) - } - return d.FS.Open(parts[1]) -} - -type emptyReader struct { -} - -func (*emptyReader) Read([]byte) (int, error) { - return 0, io.EOF -} diff --git a/vendor/github.com/tonistiigi/fsutil/go.mod b/vendor/github.com/tonistiigi/fsutil/go.mod deleted file mode 100644 index fc4cdb0014b0..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/go.mod +++ /dev/null @@ -1,20 +0,0 @@ -module github.com/tonistiigi/fsutil - -go 1.13 - -require ( - github.com/containerd/continuity v0.1.0 - github.com/docker/docker v20.10.3-0.20210609071616-4c2ec79bf2a8+incompatible // master (v21.xx-dev) - github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.4.3 // indirect - github.com/google/go-cmp v0.5.2 // indirect - github.com/kr/pretty v0.2.0 // indirect - github.com/opencontainers/go-digest v1.0.0 - github.com/pkg/errors v0.9.1 - github.com/stretchr/testify v1.7.0 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20210313202042-bd2e13477e9c - google.golang.org/protobuf v1.25.0 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gotest.tools/v3 v3.0.3 // indirect -) diff --git a/vendor/github.com/tonistiigi/fsutil/hardlinks.go b/vendor/github.com/tonistiigi/fsutil/hardlinks.go deleted file mode 100644 index ef8bbfb5daff..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/hardlinks.go +++ /dev/null @@ -1,48 +0,0 @@ -package fsutil - -import ( - "os" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -// Hardlinks validates that all targets for links were part of the changes - -type Hardlinks struct { - seenFiles map[string]struct{} -} - -func (v *Hardlinks) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - - if v.seenFiles == nil { - v.seenFiles = make(map[string]struct{}) - } - - if kind == ChangeKindDelete { - return nil - } - - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EBADMSG, Op: "change without stat info"}) - } - - if fi.IsDir() || fi.Mode()&os.ModeSymlink != 0 { - return nil - } - - if len(stat.Linkname) > 0 { - if _, ok := v.seenFiles[stat.Linkname]; !ok { - return errors.Errorf("invalid link %s to unknown path: %q", p, stat.Linkname) - } - } else { - v.seenFiles[p] = struct{}{} - } - - return nil -} diff --git a/vendor/github.com/tonistiigi/fsutil/prefix/match.go b/vendor/github.com/tonistiigi/fsutil/prefix/match.go deleted file mode 100644 index 57e745c68173..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/prefix/match.go +++ /dev/null @@ -1,45 +0,0 @@ -package prefix - -import ( - "path" - "path/filepath" - "strings" -) - -// Match matches a path against a pattern. It returns m = true if the path -// matches the pattern, and partial = true if the pattern has more separators -// than the path and the common components match (for example, name = foo and -// pattern = foo/bar/*). slashSeparator determines whether the path and pattern -// are '/' delimited (true) or use the native path separator (false). -func Match(pattern, name string, slashSeparator bool) (m bool, partial bool) { - separator := filepath.Separator - if slashSeparator { - separator = '/' - } - count := strings.Count(name, string(separator)) - if strings.Count(pattern, string(separator)) > count { - pattern = trimUntilIndex(pattern, string(separator), count) - partial = true - } - if slashSeparator { - m, _ = path.Match(pattern, name) - } else { - m, _ = filepath.Match(pattern, name) - } - return m, partial -} - -func trimUntilIndex(str, sep string, count int) string { - s := str - i := 0 - c := 0 - for { - idx := strings.Index(s, sep) - s = s[idx+len(sep):] - i += idx + len(sep) - c++ - if c > count { - return str[:i-len(sep)] - } - } -} diff --git a/vendor/github.com/tonistiigi/fsutil/readme.md b/vendor/github.com/tonistiigi/fsutil/readme.md deleted file mode 100644 index 5ce685b7edc7..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/readme.md +++ /dev/null @@ -1,45 +0,0 @@ -Incremental file directory sync tools in golang. - -``` -BENCH_FILE_SIZE=10000 ./bench.test --test.bench . -BenchmarkCopyWithTar10-4 2000 995242 ns/op -BenchmarkCopyWithTar50-4 300 4710021 ns/op -BenchmarkCopyWithTar200-4 100 16627260 ns/op -BenchmarkCopyWithTar1000-4 20 60031459 ns/op -BenchmarkCPA10-4 1000 1678367 ns/op -BenchmarkCPA50-4 500 3690306 ns/op -BenchmarkCPA200-4 200 9495066 ns/op -BenchmarkCPA1000-4 50 29769289 ns/op -BenchmarkDiffCopy10-4 2000 943889 ns/op -BenchmarkDiffCopy50-4 500 3285950 ns/op -BenchmarkDiffCopy200-4 200 8563792 ns/op -BenchmarkDiffCopy1000-4 50 29511340 ns/op -BenchmarkDiffCopyProto10-4 2000 944615 ns/op -BenchmarkDiffCopyProto50-4 500 3334940 ns/op -BenchmarkDiffCopyProto200-4 200 9420038 ns/op -BenchmarkDiffCopyProto1000-4 50 30632429 ns/op -BenchmarkIncrementalDiffCopy10-4 2000 691993 ns/op -BenchmarkIncrementalDiffCopy50-4 1000 1304253 ns/op -BenchmarkIncrementalDiffCopy200-4 500 3306519 ns/op -BenchmarkIncrementalDiffCopy1000-4 200 10211343 ns/op -BenchmarkIncrementalDiffCopy5000-4 20 55194427 ns/op -BenchmarkIncrementalDiffCopy10000-4 20 91759289 ns/op -BenchmarkIncrementalCopyWithTar10-4 2000 1020258 ns/op -BenchmarkIncrementalCopyWithTar50-4 300 5348786 ns/op -BenchmarkIncrementalCopyWithTar200-4 100 19495000 ns/op -BenchmarkIncrementalCopyWithTar1000-4 20 70338507 ns/op -BenchmarkIncrementalRsync10-4 30 45215754 ns/op -BenchmarkIncrementalRsync50-4 30 45837260 ns/op -BenchmarkIncrementalRsync200-4 30 48780614 ns/op -BenchmarkIncrementalRsync1000-4 20 54801892 ns/op -BenchmarkIncrementalRsync5000-4 20 84782542 ns/op -BenchmarkIncrementalRsync10000-4 10 103355108 ns/op -BenchmarkRsync10-4 30 46776470 ns/op -BenchmarkRsync50-4 30 48601555 ns/op -BenchmarkRsync200-4 20 59642691 ns/op -BenchmarkRsync1000-4 20 101343010 ns/op -BenchmarkGnuTar10-4 500 3171448 ns/op -BenchmarkGnuTar50-4 300 5030296 ns/op -BenchmarkGnuTar200-4 100 10464313 ns/op -BenchmarkGnuTar1000-4 50 30375257 ns/op -``` \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go deleted file mode 100644 index 209d1d2fafa2..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/receive.go +++ /dev/null @@ -1,286 +0,0 @@ -package fsutil - -import ( - "context" - "io" - "os" - "sync" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -type DiffType int - -const ( - DiffMetadata DiffType = iota - DiffNone - DiffContent -) - -type ReceiveOpt struct { - NotifyHashed ChangeFunc - ContentHasher ContentHasher - ProgressCb func(int, bool) - Merge bool - Filter FilterFunc - Differ DiffType -} - -func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - r := &receiver{ - conn: &syncStream{Stream: conn}, - dest: dest, - files: make(map[string]uint32), - pipes: make(map[uint32]io.WriteCloser), - notifyHashed: opt.NotifyHashed, - contentHasher: opt.ContentHasher, - progressCb: opt.ProgressCb, - merge: opt.Merge, - filter: opt.Filter, - differ: opt.Differ, - } - return r.run(ctx) -} - -type receiver struct { - dest string - conn Stream - files map[string]uint32 - pipes map[uint32]io.WriteCloser - mu sync.RWMutex - muPipes sync.RWMutex - progressCb func(int, bool) - merge bool - filter FilterFunc - differ DiffType - - notifyHashed ChangeFunc - contentHasher ContentHasher - orderValidator Validator - hlValidator Hardlinks -} - -type dynamicWalker struct { - walkChan chan *currentPath - err error - closeCh chan struct{} -} - -func newDynamicWalker() *dynamicWalker { - return &dynamicWalker{ - walkChan: make(chan *currentPath, 128), - closeCh: make(chan struct{}), - } -} - -func (w *dynamicWalker) update(p *currentPath) error { - select { - case <-w.closeCh: - return errors.Wrap(w.err, "walker is closed") - default: - } - if p == nil { - close(w.walkChan) - return nil - } - select { - case w.walkChan <- p: - return nil - case <-w.closeCh: - return errors.Wrap(w.err, "walker is closed") - } -} - -func (w *dynamicWalker) fill(ctx context.Context, pathC chan<- *currentPath) error { - for { - select { - case p, ok := <-w.walkChan: - if !ok { - return nil - } - select { - case pathC <- p: - case <-ctx.Done(): - w.err = ctx.Err() - close(w.closeCh) - return ctx.Err() - } - case <-ctx.Done(): - w.err = ctx.Err() - close(w.closeCh) - return ctx.Err() - } - } -} - -func (r *receiver) run(ctx context.Context) error { - g, ctx := errgroup.WithContext(ctx) - - dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{ - AsyncDataCb: r.asyncDataFunc, - NotifyCb: r.notifyHashed, - ContentHasher: r.contentHasher, - Filter: r.filter, - }) - if err != nil { - return err - } - - w := newDynamicWalker() - - g.Go(func() (retErr error) { - defer func() { - if retErr != nil { - r.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(retErr.Error())}) - } - }() - destWalker := emptyWalker - if !r.merge { - destWalker = getWalkerFn(r.dest) - } - err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill, r.filter, r.differ) - if err != nil { - return err - } - if err := dw.Wait(ctx); err != nil { - return err - } - r.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN}) - return nil - }) - - g.Go(func() error { - var i uint32 = 0 - - size := 0 - if r.progressCb != nil { - defer func() { - r.progressCb(size, true) - }() - } - var p types.Packet - for { - p = types.Packet{Data: p.Data[:0]} - if err := r.conn.RecvMsg(&p); err != nil { - return err - } - if r.progressCb != nil { - size += p.Size() - r.progressCb(size, false) - } - - switch p.Type { - case types.PACKET_ERR: - return errors.Errorf("error from sender: %s", p.Data) - case types.PACKET_STAT: - if p.Stat == nil { - if err := w.update(nil); err != nil { - return err - } - break - } - if fileCanRequestData(os.FileMode(p.Stat.Mode)) { - r.mu.Lock() - r.files[p.Stat.Path] = i - r.mu.Unlock() - } - i++ - cp := ¤tPath{path: p.Stat.Path, stat: p.Stat} - if err := r.orderValidator.HandleChange(ChangeKindAdd, cp.path, &StatInfo{cp.stat}, nil); err != nil { - return err - } - if err := r.hlValidator.HandleChange(ChangeKindAdd, cp.path, &StatInfo{cp.stat}, nil); err != nil { - return err - } - if err := w.update(cp); err != nil { - return err - } - case types.PACKET_DATA: - r.muPipes.Lock() - pw, ok := r.pipes[p.ID] - r.muPipes.Unlock() - if !ok { - return errors.Errorf("invalid file request %d", p.ID) - } - if len(p.Data) == 0 { - if err := pw.Close(); err != nil { - return err - } - } else { - if _, err := pw.Write(p.Data); err != nil { - return err - } - } - case types.PACKET_FIN: - for { - var p types.Packet - if err := r.conn.RecvMsg(&p); err != nil { - if err == io.EOF { - return nil - } - return err - } - } - } - } - }) - return g.Wait() -} - -func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteCloser) error { - r.mu.Lock() - id, ok := r.files[p] - if !ok { - r.mu.Unlock() - return errors.Errorf("invalid file request %s", p) - } - delete(r.files, p) - r.mu.Unlock() - - wwc := newWrappedWriteCloser(wc) - r.muPipes.Lock() - r.pipes[id] = wwc - r.muPipes.Unlock() - if err := r.conn.SendMsg(&types.Packet{Type: types.PACKET_REQ, ID: id}); err != nil { - return err - } - err := wwc.Wait(ctx) - if err != nil { - return err - } - r.muPipes.Lock() - delete(r.pipes, id) - r.muPipes.Unlock() - return nil -} - -type wrappedWriteCloser struct { - io.WriteCloser - err error - once sync.Once - done chan struct{} -} - -func newWrappedWriteCloser(wc io.WriteCloser) *wrappedWriteCloser { - return &wrappedWriteCloser{WriteCloser: wc, done: make(chan struct{})} -} - -func (w *wrappedWriteCloser) Close() error { - w.err = w.WriteCloser.Close() - w.once.Do(func() { close(w.done) }) - return w.err -} - -func (w *wrappedWriteCloser) Wait(ctx context.Context) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-w.done: - return w.err - } -} diff --git a/vendor/github.com/tonistiigi/fsutil/send.go b/vendor/github.com/tonistiigi/fsutil/send.go deleted file mode 100644 index 2c1a3801d58a..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/send.go +++ /dev/null @@ -1,208 +0,0 @@ -package fsutil - -import ( - "context" - "io" - "os" - "sync" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" - "golang.org/x/sync/errgroup" -) - -var bufPool = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 32*1<<10) - return &buf - }, -} - -type Stream interface { - RecvMsg(interface{}) error - SendMsg(m interface{}) error - Context() context.Context -} - -func Send(ctx context.Context, conn Stream, fs FS, progressCb func(int, bool)) error { - s := &sender{ - conn: &syncStream{Stream: conn}, - fs: fs, - files: make(map[uint32]string), - progressCb: progressCb, - sendpipeline: make(chan *sendHandle, 128), - } - return s.run(ctx) -} - -type sendHandle struct { - id uint32 - path string -} - -type sender struct { - conn Stream - fs FS - files map[uint32]string - mu sync.RWMutex - progressCb func(int, bool) - progressCurrent int - sendpipeline chan *sendHandle -} - -func (s *sender) run(ctx context.Context) error { - g, ctx := errgroup.WithContext(ctx) - - defer s.updateProgress(0, true) - - g.Go(func() error { - err := s.walk(ctx) - if err != nil { - s.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(err.Error())}) - } - return err - }) - - for i := 0; i < 4; i++ { - g.Go(func() error { - for h := range s.sendpipeline { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - if err := s.sendFile(h); err != nil { - return err - } - } - return nil - }) - } - - g.Go(func() error { - defer close(s.sendpipeline) - - for { - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - var p types.Packet - if err := s.conn.RecvMsg(&p); err != nil { - return err - } - switch p.Type { - case types.PACKET_ERR: - return errors.Errorf("error from receiver: %s", p.Data) - case types.PACKET_REQ: - if err := s.queue(p.ID); err != nil { - return err - } - case types.PACKET_FIN: - return s.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN}) - } - } - }) - - return g.Wait() -} - -func (s *sender) updateProgress(size int, last bool) { - if s.progressCb != nil { - s.progressCurrent += size - s.progressCb(s.progressCurrent, last) - } -} - -func (s *sender) queue(id uint32) error { - s.mu.Lock() - p, ok := s.files[id] - if !ok { - s.mu.Unlock() - return errors.Errorf("invalid file id %d", id) - } - delete(s.files, id) - s.mu.Unlock() - s.sendpipeline <- &sendHandle{id, p} - return nil -} - -func (s *sender) sendFile(h *sendHandle) error { - f, err := s.fs.Open(h.path) - if err == nil { - defer f.Close() - buf := bufPool.Get().(*[]byte) - defer bufPool.Put(buf) - if _, err := io.CopyBuffer(&fileSender{sender: s, id: h.id}, f, *buf); err != nil { - return err - } - } - return s.conn.SendMsg(&types.Packet{ID: h.id, Type: types.PACKET_DATA}) -} - -func (s *sender) walk(ctx context.Context) error { - var i uint32 = 0 - err := s.fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return err - } - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) - } - - p := &types.Packet{ - Type: types.PACKET_STAT, - Stat: stat, - } - if fileCanRequestData(os.FileMode(stat.Mode)) { - s.mu.Lock() - s.files[i] = stat.Path - s.mu.Unlock() - } - i++ - s.updateProgress(p.Size(), false) - return errors.Wrapf(s.conn.SendMsg(p), "failed to send stat %s", path) - }) - if err != nil { - return err - } - return errors.Wrapf(s.conn.SendMsg(&types.Packet{Type: types.PACKET_STAT}), "failed to send last stat") -} - -func fileCanRequestData(m os.FileMode) bool { - // avoid updating this function as it needs to match between sender/receiver. - // version if needed - return m&os.ModeType == 0 -} - -type fileSender struct { - sender *sender - id uint32 -} - -func (fs *fileSender) Write(dt []byte) (int, error) { - if len(dt) == 0 { - return 0, nil - } - p := &types.Packet{Type: types.PACKET_DATA, ID: fs.id, Data: dt} - if err := fs.sender.conn.SendMsg(p); err != nil { - return 0, err - } - fs.sender.updateProgress(p.Size(), false) - return len(dt), nil -} - -type syncStream struct { - Stream - mu sync.Mutex -} - -func (ss *syncStream) SendMsg(m interface{}) error { - ss.mu.Lock() - err := ss.Stream.SendMsg(m) - ss.mu.Unlock() - return err -} diff --git a/vendor/github.com/tonistiigi/fsutil/stat.go b/vendor/github.com/tonistiigi/fsutil/stat.go deleted file mode 100644 index 2ab8da118e2c..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/stat.go +++ /dev/null @@ -1,64 +0,0 @@ -package fsutil - -import ( - "os" - "path/filepath" - "runtime" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -// constructs a Stat object. path is where the path can be found right -// now, relpath is the desired path to be recorded in the stat (so -// relative to whatever base dir is relevant). fi is the os.Stat -// info. inodemap is used to calculate hardlinks over a series of -// mkstat calls and maps inode to the canonical (aka "first") path for -// a set of hardlinks to that inode. -func mkstat(path, relpath string, fi os.FileInfo, inodemap map[uint64]string) (*types.Stat, error) { - relpath = filepath.ToSlash(relpath) - - stat := &types.Stat{ - Path: relpath, - Mode: uint32(fi.Mode()), - ModTime: fi.ModTime().UnixNano(), - } - - setUnixOpt(fi, stat, relpath, inodemap) - - if !fi.IsDir() { - stat.Size_ = fi.Size() - if fi.Mode()&os.ModeSymlink != 0 { - link, err := os.Readlink(path) - if err != nil { - return nil, errors.WithStack(err) - } - stat.Linkname = link - } - } - if err := loadXattr(path, stat); err != nil { - return nil, err - } - - if runtime.GOOS == "windows" { - permPart := stat.Mode & uint32(os.ModePerm) - noPermPart := stat.Mode &^ uint32(os.ModePerm) - // Add the x bit: make everything +x from windows - permPart |= 0111 - permPart &= 0755 - stat.Mode = noPermPart | permPart - } - - // Clear the socket bit since archive/tar.FileInfoHeader does not handle it - stat.Mode &^= uint32(os.ModeSocket) - - return stat, nil -} - -func Stat(path string) (*types.Stat, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, errors.WithStack(err) - } - return mkstat(path, filepath.Base(path), fi, nil) -} diff --git a/vendor/github.com/tonistiigi/fsutil/stat_unix.go b/vendor/github.com/tonistiigi/fsutil/stat_unix.go deleted file mode 100644 index dd0ed455166f..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/stat_unix.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build !windows - -package fsutil - -import ( - "os" - "syscall" - - "github.com/containerd/continuity/sysx" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func loadXattr(origpath string, stat *types.Stat) error { - xattrs, err := sysx.LListxattr(origpath) - if err != nil { - if errors.Is(err, syscall.ENOTSUP) { - return nil - } - return errors.Wrapf(err, "failed to xattr %s", origpath) - } - if len(xattrs) > 0 { - m := make(map[string][]byte) - for _, key := range xattrs { - v, err := sysx.LGetxattr(origpath, key) - if err == nil { - m[key] = v - } - } - stat.Xattrs = m - } - return nil -} - -func setUnixOpt(fi os.FileInfo, stat *types.Stat, path string, seenFiles map[uint64]string) { - s := fi.Sys().(*syscall.Stat_t) - - stat.Uid = s.Uid - stat.Gid = s.Gid - - if !fi.IsDir() { - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - stat.Devmajor = int64(major(uint64(s.Rdev))) - stat.Devminor = int64(minor(uint64(s.Rdev))) - } - - ino := s.Ino - linked := false - if seenFiles != nil { - if s.Nlink > 1 { - if oldpath, ok := seenFiles[ino]; ok { - stat.Linkname = oldpath - stat.Size_ = 0 - linked = true - } - } - if !linked { - seenFiles[ino] = path - } - } - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} diff --git a/vendor/github.com/tonistiigi/fsutil/stat_windows.go b/vendor/github.com/tonistiigi/fsutil/stat_windows.go deleted file mode 100644 index 66379bd84c7b..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/stat_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build windows - -package fsutil - -import ( - "os" - - "github.com/tonistiigi/fsutil/types" -) - -func loadXattr(_ string, _ *types.Stat) error { - return nil -} - -func setUnixOpt(_ os.FileInfo, _ *types.Stat, _ string, _ map[uint64]string) { -} diff --git a/vendor/github.com/tonistiigi/fsutil/tarwriter.go b/vendor/github.com/tonistiigi/fsutil/tarwriter.go deleted file mode 100644 index bd46a2250ff8..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/tarwriter.go +++ /dev/null @@ -1,80 +0,0 @@ -package fsutil - -import ( - "archive/tar" - "context" - "io" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/types" -) - -func WriteTar(ctx context.Context, fs FS, w io.Writer) error { - tw := tar.NewWriter(w) - err := fs.Walk(ctx, func(path string, fi os.FileInfo, err error) error { - if err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - stat, ok := fi.Sys().(*types.Stat) - if !ok { - return errors.WithStack(&os.PathError{Path: path, Err: syscall.EBADMSG, Op: "fileinfo without stat info"}) - } - hdr, err := tar.FileInfoHeader(fi, stat.Linkname) - if err != nil { - return err - } - - name := filepath.ToSlash(path) - if fi.IsDir() && !strings.HasSuffix(name, "/") { - name += "/" - } - hdr.Name = name - - hdr.Uid = int(stat.Uid) - hdr.Gid = int(stat.Gid) - hdr.Devmajor = stat.Devmajor - hdr.Devminor = stat.Devminor - hdr.Linkname = stat.Linkname - if hdr.Linkname != "" { - hdr.Size = 0 - if fi.Mode()&os.ModeSymlink != 0 { - hdr.Typeflag = tar.TypeSymlink - } else { - hdr.Typeflag = tar.TypeLink - } - } - - if len(stat.Xattrs) > 0 { - hdr.PAXRecords = map[string]string{} - } - for k, v := range stat.Xattrs { - hdr.PAXRecords["SCHILY.xattr."+k] = string(v) - } - - if err := tw.WriteHeader(hdr); err != nil { - return errors.Wrapf(err, "failed to write file header %s", name) - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 && hdr.Linkname == "" { - rc, err := fs.Open(path) - if err != nil { - return err - } - if _, err := io.Copy(tw, rc); err != nil { - return errors.WithStack(err) - } - if err := rc.Close(); err != nil { - return errors.WithStack(err) - } - } - return nil - }) - if err != nil { - return err - } - return tw.Close() -} diff --git a/vendor/github.com/tonistiigi/fsutil/types/generate.go b/vendor/github.com/tonistiigi/fsutil/types/generate.go deleted file mode 100644 index 5c03178f3681..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package types - -//go:generate protoc --gogoslick_out=. stat.proto wire.proto diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.go b/vendor/github.com/tonistiigi/fsutil/types/stat.go deleted file mode 100644 index b79fd2bd76d4..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/stat.go +++ /dev/null @@ -1,7 +0,0 @@ -package types - -import "os" - -func (s Stat) IsDir() bool { - return os.FileMode(s.Mode).IsDir() -} diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go b/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go deleted file mode 100644 index 91200fb7790d..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/stat.pb.go +++ /dev/null @@ -1,929 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: stat.proto - -package types - -import ( - bytes "bytes" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Stat struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Mode uint32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` - Uid uint32 `protobuf:"varint,3,opt,name=uid,proto3" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,4,opt,name=gid,proto3" json:"gid,omitempty"` - Size_ int64 `protobuf:"varint,5,opt,name=size,proto3" json:"size,omitempty"` - ModTime int64 `protobuf:"varint,6,opt,name=modTime,proto3" json:"modTime,omitempty"` - // int32 typeflag = 7; - Linkname string `protobuf:"bytes,7,opt,name=linkname,proto3" json:"linkname,omitempty"` - Devmajor int64 `protobuf:"varint,8,opt,name=devmajor,proto3" json:"devmajor,omitempty"` - Devminor int64 `protobuf:"varint,9,opt,name=devminor,proto3" json:"devminor,omitempty"` - Xattrs map[string][]byte `protobuf:"bytes,10,rep,name=xattrs,proto3" json:"xattrs,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *Stat) Reset() { *m = Stat{} } -func (*Stat) ProtoMessage() {} -func (*Stat) Descriptor() ([]byte, []int) { - return fileDescriptor_01fabdc1b78bd68b, []int{0} -} -func (m *Stat) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Stat) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Stat.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Stat) XXX_Merge(src proto.Message) { - xxx_messageInfo_Stat.Merge(m, src) -} -func (m *Stat) XXX_Size() int { - return m.Size() -} -func (m *Stat) XXX_DiscardUnknown() { - xxx_messageInfo_Stat.DiscardUnknown(m) -} - -var xxx_messageInfo_Stat proto.InternalMessageInfo - -func (m *Stat) GetPath() string { - if m != nil { - return m.Path - } - return "" -} - -func (m *Stat) GetMode() uint32 { - if m != nil { - return m.Mode - } - return 0 -} - -func (m *Stat) GetUid() uint32 { - if m != nil { - return m.Uid - } - return 0 -} - -func (m *Stat) GetGid() uint32 { - if m != nil { - return m.Gid - } - return 0 -} - -func (m *Stat) GetSize_() int64 { - if m != nil { - return m.Size_ - } - return 0 -} - -func (m *Stat) GetModTime() int64 { - if m != nil { - return m.ModTime - } - return 0 -} - -func (m *Stat) GetLinkname() string { - if m != nil { - return m.Linkname - } - return "" -} - -func (m *Stat) GetDevmajor() int64 { - if m != nil { - return m.Devmajor - } - return 0 -} - -func (m *Stat) GetDevminor() int64 { - if m != nil { - return m.Devminor - } - return 0 -} - -func (m *Stat) GetXattrs() map[string][]byte { - if m != nil { - return m.Xattrs - } - return nil -} - -func init() { - proto.RegisterType((*Stat)(nil), "fsutil.types.Stat") - proto.RegisterMapType((map[string][]byte)(nil), "fsutil.types.Stat.XattrsEntry") -} - -func init() { proto.RegisterFile("stat.proto", fileDescriptor_01fabdc1b78bd68b) } - -var fileDescriptor_01fabdc1b78bd68b = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x91, 0xbd, 0x4e, 0xc3, 0x30, - 0x14, 0x85, 0x73, 0x9b, 0xfe, 0xba, 0x45, 0x42, 0x16, 0x83, 0xd5, 0xe1, 0x2a, 0x62, 0xca, 0x94, - 0x01, 0x24, 0xc4, 0xcf, 0x86, 0xc4, 0x0b, 0x04, 0x06, 0xc4, 0x66, 0x64, 0x53, 0x4c, 0x9b, 0xb8, - 0x4a, 0xdc, 0x8a, 0x32, 0xf1, 0x08, 0x3c, 0x06, 0x6f, 0x02, 0x63, 0xc7, 0x8e, 0xd4, 0x5d, 0x18, - 0xfb, 0x08, 0xc8, 0x4e, 0x5b, 0xba, 0x9d, 0xf3, 0x9d, 0x7b, 0x95, 0x9c, 0x6b, 0x42, 0x4a, 0xc3, - 0x4d, 0x32, 0x2e, 0xb4, 0xd1, 0xb4, 0xf7, 0x54, 0x4e, 0x8c, 0x1a, 0x25, 0x66, 0x36, 0x96, 0xe5, - 0xf1, 0x57, 0x8d, 0xd4, 0x6f, 0x0d, 0x37, 0x94, 0x92, 0xfa, 0x98, 0x9b, 0x67, 0x06, 0x11, 0xc4, - 0x9d, 0xd4, 0x6b, 0xc7, 0x32, 0x2d, 0x24, 0xab, 0x45, 0x10, 0x1f, 0xa4, 0x5e, 0xd3, 0x43, 0x12, - 0x4e, 0x94, 0x60, 0xa1, 0x47, 0x4e, 0x3a, 0x32, 0x50, 0x82, 0xd5, 0x2b, 0x32, 0x50, 0xc2, 0xed, - 0x95, 0xea, 0x4d, 0xb2, 0x46, 0x04, 0x71, 0x98, 0x7a, 0x4d, 0x19, 0x69, 0x65, 0x5a, 0xdc, 0xa9, - 0x4c, 0xb2, 0xa6, 0xc7, 0x5b, 0x4b, 0xfb, 0xa4, 0x3d, 0x52, 0xf9, 0x30, 0xe7, 0x99, 0x64, 0x2d, - 0xff, 0xf5, 0x9d, 0x77, 0x99, 0x90, 0xd3, 0x8c, 0xbf, 0xe8, 0x82, 0xb5, 0xfd, 0xda, 0xce, 0x6f, - 0x33, 0x95, 0xeb, 0x82, 0x75, 0xfe, 0x33, 0xe7, 0xe9, 0x19, 0x69, 0xbe, 0x72, 0x63, 0x8a, 0x92, - 0x91, 0x28, 0x8c, 0xbb, 0x27, 0x98, 0xec, 0xb7, 0x4e, 0x5c, 0xe3, 0xe4, 0xde, 0x0f, 0xdc, 0xe4, - 0xa6, 0x98, 0xa5, 0x9b, 0xe9, 0xfe, 0x05, 0xe9, 0xee, 0x61, 0x57, 0x6d, 0x28, 0x67, 0x9b, 0x9b, - 0x38, 0x49, 0x8f, 0x48, 0x63, 0xca, 0x47, 0x93, 0xea, 0x26, 0xbd, 0xb4, 0x32, 0x97, 0xb5, 0x73, - 0xb8, 0xbe, 0x9a, 0x2f, 0x31, 0x58, 0x2c, 0x31, 0x58, 0x2f, 0x11, 0xde, 0x2d, 0xc2, 0xa7, 0x45, - 0xf8, 0xb6, 0x08, 0x73, 0x8b, 0xf0, 0x63, 0x11, 0x7e, 0x2d, 0x06, 0x6b, 0x8b, 0xf0, 0xb1, 0xc2, - 0x60, 0xbe, 0xc2, 0x60, 0xb1, 0xc2, 0xe0, 0xa1, 0xe1, 0x7f, 0xe8, 0xb1, 0xe9, 0xdf, 0xe6, 0xf4, - 0x2f, 0x00, 0x00, 0xff, 0xff, 0x06, 0x97, 0xf3, 0xd7, 0xa9, 0x01, 0x00, 0x00, -} - -func (this *Stat) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Stat) - if !ok { - that2, ok := that.(Stat) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Path != that1.Path { - return false - } - if this.Mode != that1.Mode { - return false - } - if this.Uid != that1.Uid { - return false - } - if this.Gid != that1.Gid { - return false - } - if this.Size_ != that1.Size_ { - return false - } - if this.ModTime != that1.ModTime { - return false - } - if this.Linkname != that1.Linkname { - return false - } - if this.Devmajor != that1.Devmajor { - return false - } - if this.Devminor != that1.Devminor { - return false - } - if len(this.Xattrs) != len(that1.Xattrs) { - return false - } - for i := range this.Xattrs { - if !bytes.Equal(this.Xattrs[i], that1.Xattrs[i]) { - return false - } - } - return true -} -func (this *Stat) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 14) - s = append(s, "&types.Stat{") - s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") - s = append(s, "Mode: "+fmt.Sprintf("%#v", this.Mode)+",\n") - s = append(s, "Uid: "+fmt.Sprintf("%#v", this.Uid)+",\n") - s = append(s, "Gid: "+fmt.Sprintf("%#v", this.Gid)+",\n") - s = append(s, "Size_: "+fmt.Sprintf("%#v", this.Size_)+",\n") - s = append(s, "ModTime: "+fmt.Sprintf("%#v", this.ModTime)+",\n") - s = append(s, "Linkname: "+fmt.Sprintf("%#v", this.Linkname)+",\n") - s = append(s, "Devmajor: "+fmt.Sprintf("%#v", this.Devmajor)+",\n") - s = append(s, "Devminor: "+fmt.Sprintf("%#v", this.Devminor)+",\n") - keysForXattrs := make([]string, 0, len(this.Xattrs)) - for k, _ := range this.Xattrs { - keysForXattrs = append(keysForXattrs, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) - mapStringForXattrs := "map[string][]byte{" - for _, k := range keysForXattrs { - mapStringForXattrs += fmt.Sprintf("%#v: %#v,", k, this.Xattrs[k]) - } - mapStringForXattrs += "}" - if this.Xattrs != nil { - s = append(s, "Xattrs: "+mapStringForXattrs+",\n") - } - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringStat(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Stat) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Stat) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Stat) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Xattrs) > 0 { - for k := range m.Xattrs { - v := m.Xattrs[k] - baseI := i - if len(v) > 0 { - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintStat(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - } - i -= len(k) - copy(dAtA[i:], k) - i = encodeVarintStat(dAtA, i, uint64(len(k))) - i-- - dAtA[i] = 0xa - i = encodeVarintStat(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x52 - } - } - if m.Devminor != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Devminor)) - i-- - dAtA[i] = 0x48 - } - if m.Devmajor != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Devmajor)) - i-- - dAtA[i] = 0x40 - } - if len(m.Linkname) > 0 { - i -= len(m.Linkname) - copy(dAtA[i:], m.Linkname) - i = encodeVarintStat(dAtA, i, uint64(len(m.Linkname))) - i-- - dAtA[i] = 0x3a - } - if m.ModTime != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.ModTime)) - i-- - dAtA[i] = 0x30 - } - if m.Size_ != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Size_)) - i-- - dAtA[i] = 0x28 - } - if m.Gid != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Gid)) - i-- - dAtA[i] = 0x20 - } - if m.Uid != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Uid)) - i-- - dAtA[i] = 0x18 - } - if m.Mode != 0 { - i = encodeVarintStat(dAtA, i, uint64(m.Mode)) - i-- - dAtA[i] = 0x10 - } - if len(m.Path) > 0 { - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintStat(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintStat(dAtA []byte, offset int, v uint64) int { - offset -= sovStat(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Stat) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - if l > 0 { - n += 1 + l + sovStat(uint64(l)) - } - if m.Mode != 0 { - n += 1 + sovStat(uint64(m.Mode)) - } - if m.Uid != 0 { - n += 1 + sovStat(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovStat(uint64(m.Gid)) - } - if m.Size_ != 0 { - n += 1 + sovStat(uint64(m.Size_)) - } - if m.ModTime != 0 { - n += 1 + sovStat(uint64(m.ModTime)) - } - l = len(m.Linkname) - if l > 0 { - n += 1 + l + sovStat(uint64(l)) - } - if m.Devmajor != 0 { - n += 1 + sovStat(uint64(m.Devmajor)) - } - if m.Devminor != 0 { - n += 1 + sovStat(uint64(m.Devminor)) - } - if len(m.Xattrs) > 0 { - for k, v := range m.Xattrs { - _ = k - _ = v - l = 0 - if len(v) > 0 { - l = 1 + len(v) + sovStat(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovStat(uint64(len(k))) + l - n += mapEntrySize + 1 + sovStat(uint64(mapEntrySize)) - } - } - return n -} - -func sovStat(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStat(x uint64) (n int) { - return sovStat(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Stat) String() string { - if this == nil { - return "nil" - } - keysForXattrs := make([]string, 0, len(this.Xattrs)) - for k, _ := range this.Xattrs { - keysForXattrs = append(keysForXattrs, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForXattrs) - mapStringForXattrs := "map[string][]byte{" - for _, k := range keysForXattrs { - mapStringForXattrs += fmt.Sprintf("%v: %v,", k, this.Xattrs[k]) - } - mapStringForXattrs += "}" - s := strings.Join([]string{`&Stat{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, - `Uid:` + fmt.Sprintf("%v", this.Uid) + `,`, - `Gid:` + fmt.Sprintf("%v", this.Gid) + `,`, - `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, - `ModTime:` + fmt.Sprintf("%v", this.ModTime) + `,`, - `Linkname:` + fmt.Sprintf("%v", this.Linkname) + `,`, - `Devmajor:` + fmt.Sprintf("%v", this.Devmajor) + `,`, - `Devminor:` + fmt.Sprintf("%v", this.Devminor) + `,`, - `Xattrs:` + mapStringForXattrs + `,`, - `}`, - }, "") - return s -} -func valueToStringStat(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Stat) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Stat: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Stat: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStat - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) - } - m.Size_ = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Size_ |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ModTime", wireType) - } - m.ModTime = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ModTime |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStat - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Linkname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Devmajor", wireType) - } - m.Devmajor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Devmajor |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Devminor", wireType) - } - m.Devminor = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Devminor |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xattrs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStat - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStat - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Xattrs == nil { - m.Xattrs = make(map[string][]byte) - } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthStat - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthStat - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStat - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthStat - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLengthStat - } - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipStat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStat - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Xattrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStat(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthStat - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthStat - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStat(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStat - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStat - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupStat - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthStat - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthStat = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStat = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupStat = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/tonistiigi/fsutil/types/stat.proto b/vendor/github.com/tonistiigi/fsutil/types/stat.proto deleted file mode 100644 index 4138be6945be..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/stat.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package fsutil.types; - -option go_package = "types"; - -message Stat { - string path = 1; - uint32 mode = 2; - uint32 uid = 3; - uint32 gid = 4; - int64 size = 5; - int64 modTime = 6; - // int32 typeflag = 7; - string linkname = 7; - int64 devmajor = 8; - int64 devminor = 9; - map xattrs = 10; -} \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go b/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go deleted file mode 100644 index 9e22269e95c1..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/wire.pb.go +++ /dev/null @@ -1,575 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: wire.proto - -package types - -import ( - bytes "bytes" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strconv "strconv" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type Packet_PacketType int32 - -const ( - PACKET_STAT Packet_PacketType = 0 - PACKET_REQ Packet_PacketType = 1 - PACKET_DATA Packet_PacketType = 2 - PACKET_FIN Packet_PacketType = 3 - PACKET_ERR Packet_PacketType = 4 -) - -var Packet_PacketType_name = map[int32]string{ - 0: "PACKET_STAT", - 1: "PACKET_REQ", - 2: "PACKET_DATA", - 3: "PACKET_FIN", - 4: "PACKET_ERR", -} - -var Packet_PacketType_value = map[string]int32{ - "PACKET_STAT": 0, - "PACKET_REQ": 1, - "PACKET_DATA": 2, - "PACKET_FIN": 3, - "PACKET_ERR": 4, -} - -func (Packet_PacketType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f2dcdddcdf68d8e0, []int{0, 0} -} - -type Packet struct { - Type Packet_PacketType `protobuf:"varint,1,opt,name=type,proto3,enum=fsutil.types.Packet_PacketType" json:"type,omitempty"` - Stat *Stat `protobuf:"bytes,2,opt,name=stat,proto3" json:"stat,omitempty"` - ID uint32 `protobuf:"varint,3,opt,name=ID,proto3" json:"ID,omitempty"` - Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` -} - -func (m *Packet) Reset() { *m = Packet{} } -func (*Packet) ProtoMessage() {} -func (*Packet) Descriptor() ([]byte, []int) { - return fileDescriptor_f2dcdddcdf68d8e0, []int{0} -} -func (m *Packet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Packet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Packet.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Packet) XXX_Merge(src proto.Message) { - xxx_messageInfo_Packet.Merge(m, src) -} -func (m *Packet) XXX_Size() int { - return m.Size() -} -func (m *Packet) XXX_DiscardUnknown() { - xxx_messageInfo_Packet.DiscardUnknown(m) -} - -var xxx_messageInfo_Packet proto.InternalMessageInfo - -func (m *Packet) GetType() Packet_PacketType { - if m != nil { - return m.Type - } - return PACKET_STAT -} - -func (m *Packet) GetStat() *Stat { - if m != nil { - return m.Stat - } - return nil -} - -func (m *Packet) GetID() uint32 { - if m != nil { - return m.ID - } - return 0 -} - -func (m *Packet) GetData() []byte { - if m != nil { - return m.Data - } - return nil -} - -func init() { - proto.RegisterEnum("fsutil.types.Packet_PacketType", Packet_PacketType_name, Packet_PacketType_value) - proto.RegisterType((*Packet)(nil), "fsutil.types.Packet") -} - -func init() { proto.RegisterFile("wire.proto", fileDescriptor_f2dcdddcdf68d8e0) } - -var fileDescriptor_f2dcdddcdf68d8e0 = []byte{ - // 276 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a, - 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x49, 0x2b, 0x2e, 0x2d, 0xc9, 0xcc, 0xd1, 0x2b, - 0xa9, 0x2c, 0x48, 0x2d, 0x96, 0xe2, 0x2a, 0x2e, 0x49, 0x2c, 0x81, 0xc8, 0x28, 0xbd, 0x64, 0xe4, - 0x62, 0x0b, 0x48, 0x4c, 0xce, 0x4e, 0x2d, 0x11, 0x32, 0xe6, 0x62, 0x01, 0xc9, 0x4b, 0x30, 0x2a, - 0x30, 0x6a, 0xf0, 0x19, 0xc9, 0xeb, 0x21, 0xeb, 0xd1, 0x83, 0xa8, 0x81, 0x52, 0x21, 0x95, 0x05, - 0xa9, 0x41, 0x60, 0xc5, 0x42, 0x6a, 0x5c, 0x2c, 0x20, 0xd3, 0x24, 0x98, 0x14, 0x18, 0x35, 0xb8, - 0x8d, 0x84, 0x50, 0x35, 0x05, 0x97, 0x24, 0x96, 0x04, 0x81, 0xe5, 0x85, 0xf8, 0xb8, 0x98, 0x3c, - 0x5d, 0x24, 0x98, 0x15, 0x18, 0x35, 0x78, 0x83, 0x98, 0x3c, 0x5d, 0x84, 0x84, 0xb8, 0x58, 0x52, - 0x12, 0x4b, 0x12, 0x25, 0x58, 0x14, 0x18, 0x35, 0x78, 0x82, 0xc0, 0x6c, 0xa5, 0x38, 0x2e, 0x2e, - 0x84, 0xf9, 0x42, 0xfc, 0x5c, 0xdc, 0x01, 0x8e, 0xce, 0xde, 0xae, 0x21, 0xf1, 0xc1, 0x21, 0x8e, - 0x21, 0x02, 0x0c, 0x42, 0x7c, 0x5c, 0x5c, 0x50, 0x81, 0x20, 0xd7, 0x40, 0x01, 0x46, 0x24, 0x05, - 0x2e, 0x8e, 0x21, 0x8e, 0x02, 0x4c, 0x48, 0x0a, 0xdc, 0x3c, 0xfd, 0x04, 0x98, 0x91, 0xf8, 0xae, - 0x41, 0x41, 0x02, 0x2c, 0x4e, 0xd6, 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, - 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, - 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x3c, 0x92, 0x63, - 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x58, 0xc1, - 0x7e, 0x49, 0x62, 0x03, 0x87, 0x97, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x3f, 0x9d, 0xe3, 0x51, - 0x57, 0x01, 0x00, 0x00, -} - -func (x Packet_PacketType) String() string { - s, ok := Packet_PacketType_name[int32(x)] - if ok { - return s - } - return strconv.Itoa(int(x)) -} -func (this *Packet) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Packet) - if !ok { - that2, ok := that.(Packet) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Type != that1.Type { - return false - } - if !this.Stat.Equal(that1.Stat) { - return false - } - if this.ID != that1.ID { - return false - } - if !bytes.Equal(this.Data, that1.Data) { - return false - } - return true -} -func (this *Packet) GoString() string { - if this == nil { - return "nil" - } - s := make([]string, 0, 8) - s = append(s, "&types.Packet{") - s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") - if this.Stat != nil { - s = append(s, "Stat: "+fmt.Sprintf("%#v", this.Stat)+",\n") - } - s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n") - s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n") - s = append(s, "}") - return strings.Join(s, "") -} -func valueToGoStringWire(v interface{}, typ string) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) -} -func (m *Packet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Packet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Packet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Data) > 0 { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintWire(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x22 - } - if m.ID != 0 { - i = encodeVarintWire(dAtA, i, uint64(m.ID)) - i-- - dAtA[i] = 0x18 - } - if m.Stat != nil { - { - size, err := m.Stat.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintWire(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Type != 0 { - i = encodeVarintWire(dAtA, i, uint64(m.Type)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintWire(dAtA []byte, offset int, v uint64) int { - offset -= sovWire(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Packet) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Type != 0 { - n += 1 + sovWire(uint64(m.Type)) - } - if m.Stat != nil { - l = m.Stat.Size() - n += 1 + l + sovWire(uint64(l)) - } - if m.ID != 0 { - n += 1 + sovWire(uint64(m.ID)) - } - l = len(m.Data) - if l > 0 { - n += 1 + l + sovWire(uint64(l)) - } - return n -} - -func sovWire(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozWire(x uint64) (n int) { - return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Packet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Packet{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Stat:` + strings.Replace(fmt.Sprintf("%v", this.Stat), "Stat", "Stat", 1) + `,`, - `ID:` + fmt.Sprintf("%v", this.ID) + `,`, - `Data:` + fmt.Sprintf("%v", this.Data) + `,`, - `}`, - }, "") - return s -} -func valueToStringWire(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Packet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Packet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Packet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Type |= Packet_PacketType(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWire - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWire - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Stat == nil { - m.Stat = &Stat{} - } - if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ID |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthWire - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthWire - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWire(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWire - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthWire - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipWire(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthWire - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupWire - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthWire - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupWire = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/github.com/tonistiigi/fsutil/types/wire.proto b/vendor/github.com/tonistiigi/fsutil/types/wire.proto deleted file mode 100644 index 3e85000c58c0..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/types/wire.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package fsutil.types; - -option go_package = "types"; - -import "stat.proto"; - -message Packet { - enum PacketType { - PACKET_STAT = 0; - PACKET_REQ = 1; - PACKET_DATA = 2; - PACKET_FIN = 3; - PACKET_ERR = 4; - } - PacketType type = 1; - Stat stat = 2; - uint32 ID = 3; - bytes data = 4; -} diff --git a/vendor/github.com/tonistiigi/fsutil/validator.go b/vendor/github.com/tonistiigi/fsutil/validator.go deleted file mode 100644 index 9bd7d94d3643..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/validator.go +++ /dev/null @@ -1,93 +0,0 @@ -package fsutil - -import ( - "os" - "path" - "runtime" - "sort" - "strings" - "syscall" - - "github.com/pkg/errors" -) - -type parent struct { - dir string - last string -} - -type Validator struct { - parentDirs []parent -} - -func (v *Validator) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err error) (retErr error) { - if err != nil { - return err - } - // test that all paths are in order and all parent dirs were present - if v.parentDirs == nil { - v.parentDirs = make([]parent, 1, 10) - } - if runtime.GOOS == "windows" { - p = strings.Replace(p, "\\", "", -1) - } - if p != path.Clean(p) { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "unclean path"}) - } - if path.IsAbs(p) { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "absolute path"}) - } - dir := path.Dir(p) - base := path.Base(p) - if dir == "." { - dir = "" - } - if dir == ".." || strings.HasPrefix(p, "../") { - return errors.WithStack(&os.PathError{Path: p, Err: syscall.EINVAL, Op: "escape check"}) - } - - // find a parent dir from saved records - i := sort.Search(len(v.parentDirs), func(i int) bool { - return ComparePath(v.parentDirs[len(v.parentDirs)-1-i].dir, dir) <= 0 - }) - i = len(v.parentDirs) - 1 - i - if i != len(v.parentDirs)-1 { // skipping back to grandparent - v.parentDirs = v.parentDirs[:i+1] - } - - if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base { - return errors.Errorf("changes out of order: %q %q", p, path.Join(v.parentDirs[i].dir, v.parentDirs[i].last)) - } - v.parentDirs[i].last = base - if kind != ChangeKindDelete && fi.IsDir() { - v.parentDirs = append(v.parentDirs, parent{ - dir: path.Join(dir, base), - last: "", - }) - } - // todo: validate invalid mode combinations - return err -} - -func ComparePath(p1, p2 string) int { - // byte-by-byte comparison to be compatible with str<>str - min := min(len(p1), len(p2)) - for i := 0; i < min; i++ { - switch { - case p1[i] == p2[i]: - continue - case p2[i] != '/' && p1[i] < p2[i] || p1[i] == '/': - return -1 - default: - return 1 - } - } - return len(p1) - len(p2) -} - -func min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go deleted file mode 100644 index cc6143db49c7..000000000000 --- a/vendor/github.com/tonistiigi/fsutil/walker.go +++ /dev/null @@ -1,196 +0,0 @@ -package fsutil - -import ( - "context" - "os" - "path/filepath" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/fileutils" - "github.com/pkg/errors" - "github.com/tonistiigi/fsutil/prefix" - "github.com/tonistiigi/fsutil/types" -) - -type WalkOpt struct { - IncludePatterns []string - ExcludePatterns []string - // FollowPaths contains symlinks that are resolved into include patterns - // before performing the fs walk - FollowPaths []string - Map FilterFunc -} - -func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error { - root, err := filepath.EvalSymlinks(p) - if err != nil { - return errors.WithStack(&os.PathError{Op: "resolve", Path: root, Err: err}) - } - fi, err := os.Stat(root) - if err != nil { - return errors.WithStack(err) - } - if !fi.IsDir() { - return errors.WithStack(&os.PathError{Op: "walk", Path: root, Err: syscall.ENOTDIR}) - } - - var pm *fileutils.PatternMatcher - if opt != nil && opt.ExcludePatterns != nil { - pm, err = fileutils.NewPatternMatcher(opt.ExcludePatterns) - if err != nil { - return errors.Wrapf(err, "invalid excludepatterns: %s", opt.ExcludePatterns) - } - } - - var includePatterns []string - if opt != nil && opt.IncludePatterns != nil { - includePatterns = make([]string, len(opt.IncludePatterns)) - for k := range opt.IncludePatterns { - includePatterns[k] = filepath.Clean(opt.IncludePatterns[k]) - } - } - if opt != nil && opt.FollowPaths != nil { - targets, err := FollowLinks(p, opt.FollowPaths) - if err != nil { - return err - } - if targets != nil { - includePatterns = append(includePatterns, targets...) - includePatterns = dedupePaths(includePatterns) - } - } - - var lastIncludedDir string - - seenFiles := make(map[uint64]string) - return filepath.Walk(root, func(path string, fi os.FileInfo, err error) (retErr error) { - defer func() { - if retErr != nil && isNotExist(retErr) { - retErr = filepath.SkipDir - } - }() - if err != nil { - return err - } - - origpath := path - path, err = filepath.Rel(root, path) - if err != nil { - return err - } - // Skip root - if path == "." { - return nil - } - - if opt != nil { - if includePatterns != nil { - skip := false - if lastIncludedDir != "" { - if strings.HasPrefix(path, lastIncludedDir+string(filepath.Separator)) { - skip = true - } - } - - if !skip { - matched := false - partial := true - for _, pattern := range includePatterns { - if ok, p := prefix.Match(pattern, path, false); ok { - matched = true - if !p { - partial = false - break - } - } - } - if !matched { - if fi.IsDir() { - return filepath.SkipDir - } - return nil - } - if !partial && fi.IsDir() { - lastIncludedDir = path - } - } - } - if pm != nil { - m, err := pm.Matches(path) - if err != nil { - return errors.Wrap(err, "failed to match excludepatterns") - } - - if m { - if fi.IsDir() { - if !pm.Exclusions() { - return filepath.SkipDir - } - dirSlash := path + string(filepath.Separator) - for _, pat := range pm.Patterns() { - if !pat.Exclusion() { - continue - } - patStr := pat.String() + string(filepath.Separator) - if strings.HasPrefix(patStr, dirSlash) { - goto passedFilter - } - } - return filepath.SkipDir - } - return nil - } - } - } - - passedFilter: - stat, err := mkstat(origpath, path, fi, seenFiles) - if err != nil { - return err - } - - select { - case <-ctx.Done(): - return ctx.Err() - default: - if opt != nil && opt.Map != nil { - if allowed := opt.Map(stat.Path, stat); !allowed { - return nil - } - } - if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil { - return err - } - } - return nil - }) -} - -type StatInfo struct { - *types.Stat -} - -func (s *StatInfo) Name() string { - return filepath.Base(s.Stat.Path) -} -func (s *StatInfo) Size() int64 { - return s.Stat.Size_ -} -func (s *StatInfo) Mode() os.FileMode { - return os.FileMode(s.Stat.Mode) -} -func (s *StatInfo) ModTime() time.Time { - return time.Unix(s.Stat.ModTime/1e9, s.Stat.ModTime%1e9) -} -func (s *StatInfo) IsDir() bool { - return s.Mode().IsDir() -} -func (s *StatInfo) Sys() interface{} { - return s.Stat -} - -func isNotExist(err error) bool { - return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) -} diff --git a/vendor/github.com/tonistiigi/units/LICENSE b/vendor/github.com/tonistiigi/units/LICENSE deleted file mode 100644 index 5c1095df0d9d..000000000000 --- a/vendor/github.com/tonistiigi/units/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2017 Tõnis Tiigi - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/vendor/github.com/tonistiigi/units/bytes.go b/vendor/github.com/tonistiigi/units/bytes.go deleted file mode 100644 index 5a82fc1b349b..000000000000 --- a/vendor/github.com/tonistiigi/units/bytes.go +++ /dev/null @@ -1,125 +0,0 @@ -/* - Simple byte size formatting. - - This package implements types that can be used in stdlib formatting functions - like `fmt.Printf` to control the output of the expected printed string. - - - Floating point flags %f and %g print the value in using the correct unit - suffix. Decimal units are default, # switches to binary units. If a value is - best represented as full bytes, integer bytes are printed instead. - - Examples: - fmt.Printf("%.2f", 123 * B) => "123B" - fmt.Printf("%.2f", 1234 * B) => "1.23kB" - fmt.Printf("%g", 1200 * B) => "1.2kB" - fmt.Printf("%#g", 1024 * B) => "1KiB" - - - Integer flag %d always prints the value in bytes. # flag adds an unit prefix. - - Examples: - fmt.Printf("%d", 1234 * B) => "1234" - fmt.Printf("%#d", 1234 * B) => "1234B" - - %v is equal to %g - -*/ -package units - -import ( - "fmt" - "io" - "math" - "math/big" -) - -type Bytes int64 - -const ( - B Bytes = 1 << (10 * iota) - KiB - MiB - GiB - TiB - PiB - EiB - - KB = 1e3 * B - MB = 1e3 * KB - GB = 1e3 * MB - TB = 1e3 * GB - PB = 1e3 * TB - EB = 1e3 * PB -) - -var units = map[bool][]string{ - false: []string{ - "B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB", - }, - true: []string{ - "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB", - }, -} - -func (b Bytes) Format(f fmt.State, c rune) { - switch c { - case 'f', 'g': - fv, unit, ok := b.floatValue(f.Flag('#')) - if !ok { - b.formatInt(&noPrecision{f}, 'd', true) - return - } - big.NewFloat(fv).Format(f, c) - io.WriteString(f, unit) - case 'd': - b.formatInt(f, c, f.Flag('#')) - default: - if f.Flag('#') { - fmt.Fprintf(f, "bytes(%d)", int64(b)) - } else { - fmt.Fprintf(f, "%g", b) - } - } -} - -func (b Bytes) formatInt(f fmt.State, c rune, withUnit bool) { - big.NewInt(int64(b)).Format(f, c) - if withUnit { - io.WriteString(f, "B") - } -} - -func (b Bytes) floatValue(binary bool) (float64, string, bool) { - i := 0 - var baseUnit Bytes = 1 - if b < 0 { - baseUnit *= -1 - } - for { - next := baseUnit - if binary { - next *= 1 << 10 - } else { - next *= 1e3 - } - if (baseUnit > 0 && b >= next) || (baseUnit < 0 && b <= next) { - i++ - baseUnit = next - continue - } - if i == 0 { - return 0, "", false - } - - return float64(b) / math.Abs(float64(baseUnit)), units[binary][i], true - } -} - -type noPrecision struct { - fmt.State -} - -func (*noPrecision) Precision() (prec int, ok bool) { - return 0, false -} diff --git a/vendor/github.com/tonistiigi/units/readme.md b/vendor/github.com/tonistiigi/units/readme.md deleted file mode 100644 index 5c67d30d43d1..000000000000 --- a/vendor/github.com/tonistiigi/units/readme.md +++ /dev/null @@ -1,29 +0,0 @@ -#### Simple byte size formatting. - -This package implements types that can be used in stdlib formatting functions -like `fmt.Printf` to control the output of the expected printed string. - -Floating point flags `%f` and %g print the value in using the correct unit -suffix. Decimal units are default, `#` switches to binary units. If a value is -best represented as full bytes, integer bytes are printed instead. - -##### Examples: - -``` -fmt.Printf("%.2f", 123 * B) => "123B" -fmt.Printf("%.2f", 1234 * B) => "1.23kB" -fmt.Printf("%g", 1200 * B) => "1.2kB" -fmt.Printf("%#g", 1024 * B) => "1KiB" -``` - - -Integer flag `%d` always prints the value in bytes. `#` flag adds an unit prefix. - -##### Examples: - -``` -fmt.Printf("%d", 1234 * B) => "1234" -fmt.Printf("%#d", 1234 * B) => "1234B" -``` - -`%v` is equal to `%g` \ No newline at end of file diff --git a/vendor/golang.org/x/crypto/blowfish/block.go b/vendor/golang.org/x/crypto/blowfish/block.go deleted file mode 100644 index 9d80f19521b4..000000000000 --- a/vendor/golang.org/x/crypto/blowfish/block.go +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package blowfish - -// getNextWord returns the next big-endian uint32 value from the byte slice -// at the given position in a circular manner, updating the position. -func getNextWord(b []byte, pos *int) uint32 { - var w uint32 - j := *pos - for i := 0; i < 4; i++ { - w = w<<8 | uint32(b[j]) - j++ - if j >= len(b) { - j = 0 - } - } - *pos = j - return w -} - -// ExpandKey performs a key expansion on the given *Cipher. Specifically, it -// performs the Blowfish algorithm's key schedule which sets up the *Cipher's -// pi and substitution tables for calls to Encrypt. This is used, primarily, -// by the bcrypt package to reuse the Blowfish key schedule during its -// set up. It's unlikely that you need to use this directly. -func ExpandKey(key []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - // Using inlined getNextWord for performance. - var d uint32 - for k := 0; k < 4; k++ { - d = d<<8 | uint32(key[j]) - j++ - if j >= len(key) { - j = 0 - } - } - c.p[i] ^= d - } - - var l, r uint32 - for i := 0; i < 18; i += 2 { - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - for i := 0; i < 256; i += 2 { - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -// This is similar to ExpandKey, but folds the salt during the key -// schedule. While ExpandKey is essentially expandKeyWithSalt with an all-zero -// salt passed in, reusing ExpandKey turns out to be a place of inefficiency -// and specializing it here is useful. -func expandKeyWithSalt(key []byte, salt []byte, c *Cipher) { - j := 0 - for i := 0; i < 18; i++ { - c.p[i] ^= getNextWord(key, &j) - } - - j = 0 - var l, r uint32 - for i := 0; i < 18; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.p[i], c.p[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s0[i], c.s0[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s1[i], c.s1[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s2[i], c.s2[i+1] = l, r - } - - for i := 0; i < 256; i += 2 { - l ^= getNextWord(salt, &j) - r ^= getNextWord(salt, &j) - l, r = encryptBlock(l, r, c) - c.s3[i], c.s3[i+1] = l, r - } -} - -func encryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[0] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[1] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[2] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[3] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[4] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[5] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[6] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[7] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[8] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[9] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[10] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[11] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[12] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[13] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[14] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[15] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[16] - xr ^= c.p[17] - return xr, xl -} - -func decryptBlock(l, r uint32, c *Cipher) (uint32, uint32) { - xl, xr := l, r - xl ^= c.p[17] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[16] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[15] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[14] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[13] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[12] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[11] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[10] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[9] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[8] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[7] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[6] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[5] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[4] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[3] - xr ^= ((c.s0[byte(xl>>24)] + c.s1[byte(xl>>16)]) ^ c.s2[byte(xl>>8)]) + c.s3[byte(xl)] ^ c.p[2] - xl ^= ((c.s0[byte(xr>>24)] + c.s1[byte(xr>>16)]) ^ c.s2[byte(xr>>8)]) + c.s3[byte(xr)] ^ c.p[1] - xr ^= c.p[0] - return xr, xl -} diff --git a/vendor/golang.org/x/crypto/blowfish/cipher.go b/vendor/golang.org/x/crypto/blowfish/cipher.go deleted file mode 100644 index 213bf204afea..000000000000 --- a/vendor/golang.org/x/crypto/blowfish/cipher.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package blowfish implements Bruce Schneier's Blowfish encryption algorithm. -// -// Blowfish is a legacy cipher and its short block size makes it vulnerable to -// birthday bound attacks (see https://sweet32.info). It should only be used -// where compatibility with legacy systems, not security, is the goal. -// -// Deprecated: any new system should use AES (from crypto/aes, if necessary in -// an AEAD mode like crypto/cipher.NewGCM) or XChaCha20-Poly1305 (from -// golang.org/x/crypto/chacha20poly1305). -package blowfish // import "golang.org/x/crypto/blowfish" - -// The code is a port of Bruce Schneier's C implementation. -// See https://www.schneier.com/blowfish.html. - -import "strconv" - -// The Blowfish block size in bytes. -const BlockSize = 8 - -// A Cipher is an instance of Blowfish encryption using a particular key. -type Cipher struct { - p [18]uint32 - s0, s1, s2, s3 [256]uint32 -} - -type KeySizeError int - -func (k KeySizeError) Error() string { - return "crypto/blowfish: invalid key size " + strconv.Itoa(int(k)) -} - -// NewCipher creates and returns a Cipher. -// The key argument should be the Blowfish key, from 1 to 56 bytes. -func NewCipher(key []byte) (*Cipher, error) { - var result Cipher - if k := len(key); k < 1 || k > 56 { - return nil, KeySizeError(k) - } - initCipher(&result) - ExpandKey(key, &result) - return &result, nil -} - -// NewSaltedCipher creates a returns a Cipher that folds a salt into its key -// schedule. For most purposes, NewCipher, instead of NewSaltedCipher, is -// sufficient and desirable. For bcrypt compatibility, the key can be over 56 -// bytes. -func NewSaltedCipher(key, salt []byte) (*Cipher, error) { - if len(salt) == 0 { - return NewCipher(key) - } - var result Cipher - if k := len(key); k < 1 { - return nil, KeySizeError(k) - } - initCipher(&result) - expandKeyWithSalt(key, salt, &result) - return &result, nil -} - -// BlockSize returns the Blowfish block size, 8 bytes. -// It is necessary to satisfy the Block interface in the -// package "crypto/cipher". -func (c *Cipher) BlockSize() int { return BlockSize } - -// Encrypt encrypts the 8-byte buffer src using the key k -// and stores the result in dst. -// Note that for amounts of data larger than a block, -// it is not safe to just call Encrypt on successive blocks; -// instead, use an encryption mode like CBC (see crypto/cipher/cbc.go). -func (c *Cipher) Encrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = encryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -// Decrypt decrypts the 8-byte buffer src using the key k -// and stores the result in dst. -func (c *Cipher) Decrypt(dst, src []byte) { - l := uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3]) - r := uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7]) - l, r = decryptBlock(l, r, c) - dst[0], dst[1], dst[2], dst[3] = byte(l>>24), byte(l>>16), byte(l>>8), byte(l) - dst[4], dst[5], dst[6], dst[7] = byte(r>>24), byte(r>>16), byte(r>>8), byte(r) -} - -func initCipher(c *Cipher) { - copy(c.p[0:], p[0:]) - copy(c.s0[0:], s0[0:]) - copy(c.s1[0:], s1[0:]) - copy(c.s2[0:], s2[0:]) - copy(c.s3[0:], s3[0:]) -} diff --git a/vendor/golang.org/x/crypto/blowfish/const.go b/vendor/golang.org/x/crypto/blowfish/const.go deleted file mode 100644 index d04077595abc..000000000000 --- a/vendor/golang.org/x/crypto/blowfish/const.go +++ /dev/null @@ -1,199 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// The startup permutation array and substitution boxes. -// They are the hexadecimal digits of PI; see: -// https://www.schneier.com/code/constants.txt. - -package blowfish - -var s0 = [256]uint32{ - 0xd1310ba6, 0x98dfb5ac, 0x2ffd72db, 0xd01adfb7, 0xb8e1afed, 0x6a267e96, - 0xba7c9045, 0xf12c7f99, 0x24a19947, 0xb3916cf7, 0x0801f2e2, 0x858efc16, - 0x636920d8, 0x71574e69, 0xa458fea3, 0xf4933d7e, 0x0d95748f, 0x728eb658, - 0x718bcd58, 0x82154aee, 0x7b54a41d, 0xc25a59b5, 0x9c30d539, 0x2af26013, - 0xc5d1b023, 0x286085f0, 0xca417918, 0xb8db38ef, 0x8e79dcb0, 0x603a180e, - 0x6c9e0e8b, 0xb01e8a3e, 0xd71577c1, 0xbd314b27, 0x78af2fda, 0x55605c60, - 0xe65525f3, 0xaa55ab94, 0x57489862, 0x63e81440, 0x55ca396a, 0x2aab10b6, - 0xb4cc5c34, 0x1141e8ce, 0xa15486af, 0x7c72e993, 0xb3ee1411, 0x636fbc2a, - 0x2ba9c55d, 0x741831f6, 0xce5c3e16, 0x9b87931e, 0xafd6ba33, 0x6c24cf5c, - 0x7a325381, 0x28958677, 0x3b8f4898, 0x6b4bb9af, 0xc4bfe81b, 0x66282193, - 0x61d809cc, 0xfb21a991, 0x487cac60, 0x5dec8032, 0xef845d5d, 0xe98575b1, - 0xdc262302, 0xeb651b88, 0x23893e81, 0xd396acc5, 0x0f6d6ff3, 0x83f44239, - 0x2e0b4482, 0xa4842004, 0x69c8f04a, 0x9e1f9b5e, 0x21c66842, 0xf6e96c9a, - 0x670c9c61, 0xabd388f0, 0x6a51a0d2, 0xd8542f68, 0x960fa728, 0xab5133a3, - 0x6eef0b6c, 0x137a3be4, 0xba3bf050, 0x7efb2a98, 0xa1f1651d, 0x39af0176, - 0x66ca593e, 0x82430e88, 0x8cee8619, 0x456f9fb4, 0x7d84a5c3, 0x3b8b5ebe, - 0xe06f75d8, 0x85c12073, 0x401a449f, 0x56c16aa6, 0x4ed3aa62, 0x363f7706, - 0x1bfedf72, 0x429b023d, 0x37d0d724, 0xd00a1248, 0xdb0fead3, 0x49f1c09b, - 0x075372c9, 0x80991b7b, 0x25d479d8, 0xf6e8def7, 0xe3fe501a, 0xb6794c3b, - 0x976ce0bd, 0x04c006ba, 0xc1a94fb6, 0x409f60c4, 0x5e5c9ec2, 0x196a2463, - 0x68fb6faf, 0x3e6c53b5, 0x1339b2eb, 0x3b52ec6f, 0x6dfc511f, 0x9b30952c, - 0xcc814544, 0xaf5ebd09, 0xbee3d004, 0xde334afd, 0x660f2807, 0x192e4bb3, - 0xc0cba857, 0x45c8740f, 0xd20b5f39, 0xb9d3fbdb, 0x5579c0bd, 0x1a60320a, - 0xd6a100c6, 0x402c7279, 0x679f25fe, 0xfb1fa3cc, 0x8ea5e9f8, 0xdb3222f8, - 0x3c7516df, 0xfd616b15, 0x2f501ec8, 0xad0552ab, 0x323db5fa, 0xfd238760, - 0x53317b48, 0x3e00df82, 0x9e5c57bb, 0xca6f8ca0, 0x1a87562e, 0xdf1769db, - 0xd542a8f6, 0x287effc3, 0xac6732c6, 0x8c4f5573, 0x695b27b0, 0xbbca58c8, - 0xe1ffa35d, 0xb8f011a0, 0x10fa3d98, 0xfd2183b8, 0x4afcb56c, 0x2dd1d35b, - 0x9a53e479, 0xb6f84565, 0xd28e49bc, 0x4bfb9790, 0xe1ddf2da, 0xa4cb7e33, - 0x62fb1341, 0xcee4c6e8, 0xef20cada, 0x36774c01, 0xd07e9efe, 0x2bf11fb4, - 0x95dbda4d, 0xae909198, 0xeaad8e71, 0x6b93d5a0, 0xd08ed1d0, 0xafc725e0, - 0x8e3c5b2f, 0x8e7594b7, 0x8ff6e2fb, 0xf2122b64, 0x8888b812, 0x900df01c, - 0x4fad5ea0, 0x688fc31c, 0xd1cff191, 0xb3a8c1ad, 0x2f2f2218, 0xbe0e1777, - 0xea752dfe, 0x8b021fa1, 0xe5a0cc0f, 0xb56f74e8, 0x18acf3d6, 0xce89e299, - 0xb4a84fe0, 0xfd13e0b7, 0x7cc43b81, 0xd2ada8d9, 0x165fa266, 0x80957705, - 0x93cc7314, 0x211a1477, 0xe6ad2065, 0x77b5fa86, 0xc75442f5, 0xfb9d35cf, - 0xebcdaf0c, 0x7b3e89a0, 0xd6411bd3, 0xae1e7e49, 0x00250e2d, 0x2071b35e, - 0x226800bb, 0x57b8e0af, 0x2464369b, 0xf009b91e, 0x5563911d, 0x59dfa6aa, - 0x78c14389, 0xd95a537f, 0x207d5ba2, 0x02e5b9c5, 0x83260376, 0x6295cfa9, - 0x11c81968, 0x4e734a41, 0xb3472dca, 0x7b14a94a, 0x1b510052, 0x9a532915, - 0xd60f573f, 0xbc9bc6e4, 0x2b60a476, 0x81e67400, 0x08ba6fb5, 0x571be91f, - 0xf296ec6b, 0x2a0dd915, 0xb6636521, 0xe7b9f9b6, 0xff34052e, 0xc5855664, - 0x53b02d5d, 0xa99f8fa1, 0x08ba4799, 0x6e85076a, -} - -var s1 = [256]uint32{ - 0x4b7a70e9, 0xb5b32944, 0xdb75092e, 0xc4192623, 0xad6ea6b0, 0x49a7df7d, - 0x9cee60b8, 0x8fedb266, 0xecaa8c71, 0x699a17ff, 0x5664526c, 0xc2b19ee1, - 0x193602a5, 0x75094c29, 0xa0591340, 0xe4183a3e, 0x3f54989a, 0x5b429d65, - 0x6b8fe4d6, 0x99f73fd6, 0xa1d29c07, 0xefe830f5, 0x4d2d38e6, 0xf0255dc1, - 0x4cdd2086, 0x8470eb26, 0x6382e9c6, 0x021ecc5e, 0x09686b3f, 0x3ebaefc9, - 0x3c971814, 0x6b6a70a1, 0x687f3584, 0x52a0e286, 0xb79c5305, 0xaa500737, - 0x3e07841c, 0x7fdeae5c, 0x8e7d44ec, 0x5716f2b8, 0xb03ada37, 0xf0500c0d, - 0xf01c1f04, 0x0200b3ff, 0xae0cf51a, 0x3cb574b2, 0x25837a58, 0xdc0921bd, - 0xd19113f9, 0x7ca92ff6, 0x94324773, 0x22f54701, 0x3ae5e581, 0x37c2dadc, - 0xc8b57634, 0x9af3dda7, 0xa9446146, 0x0fd0030e, 0xecc8c73e, 0xa4751e41, - 0xe238cd99, 0x3bea0e2f, 0x3280bba1, 0x183eb331, 0x4e548b38, 0x4f6db908, - 0x6f420d03, 0xf60a04bf, 0x2cb81290, 0x24977c79, 0x5679b072, 0xbcaf89af, - 0xde9a771f, 0xd9930810, 0xb38bae12, 0xdccf3f2e, 0x5512721f, 0x2e6b7124, - 0x501adde6, 0x9f84cd87, 0x7a584718, 0x7408da17, 0xbc9f9abc, 0xe94b7d8c, - 0xec7aec3a, 0xdb851dfa, 0x63094366, 0xc464c3d2, 0xef1c1847, 0x3215d908, - 0xdd433b37, 0x24c2ba16, 0x12a14d43, 0x2a65c451, 0x50940002, 0x133ae4dd, - 0x71dff89e, 0x10314e55, 0x81ac77d6, 0x5f11199b, 0x043556f1, 0xd7a3c76b, - 0x3c11183b, 0x5924a509, 0xf28fe6ed, 0x97f1fbfa, 0x9ebabf2c, 0x1e153c6e, - 0x86e34570, 0xeae96fb1, 0x860e5e0a, 0x5a3e2ab3, 0x771fe71c, 0x4e3d06fa, - 0x2965dcb9, 0x99e71d0f, 0x803e89d6, 0x5266c825, 0x2e4cc978, 0x9c10b36a, - 0xc6150eba, 0x94e2ea78, 0xa5fc3c53, 0x1e0a2df4, 0xf2f74ea7, 0x361d2b3d, - 0x1939260f, 0x19c27960, 0x5223a708, 0xf71312b6, 0xebadfe6e, 0xeac31f66, - 0xe3bc4595, 0xa67bc883, 0xb17f37d1, 0x018cff28, 0xc332ddef, 0xbe6c5aa5, - 0x65582185, 0x68ab9802, 0xeecea50f, 0xdb2f953b, 0x2aef7dad, 0x5b6e2f84, - 0x1521b628, 0x29076170, 0xecdd4775, 0x619f1510, 0x13cca830, 0xeb61bd96, - 0x0334fe1e, 0xaa0363cf, 0xb5735c90, 0x4c70a239, 0xd59e9e0b, 0xcbaade14, - 0xeecc86bc, 0x60622ca7, 0x9cab5cab, 0xb2f3846e, 0x648b1eaf, 0x19bdf0ca, - 0xa02369b9, 0x655abb50, 0x40685a32, 0x3c2ab4b3, 0x319ee9d5, 0xc021b8f7, - 0x9b540b19, 0x875fa099, 0x95f7997e, 0x623d7da8, 0xf837889a, 0x97e32d77, - 0x11ed935f, 0x16681281, 0x0e358829, 0xc7e61fd6, 0x96dedfa1, 0x7858ba99, - 0x57f584a5, 0x1b227263, 0x9b83c3ff, 0x1ac24696, 0xcdb30aeb, 0x532e3054, - 0x8fd948e4, 0x6dbc3128, 0x58ebf2ef, 0x34c6ffea, 0xfe28ed61, 0xee7c3c73, - 0x5d4a14d9, 0xe864b7e3, 0x42105d14, 0x203e13e0, 0x45eee2b6, 0xa3aaabea, - 0xdb6c4f15, 0xfacb4fd0, 0xc742f442, 0xef6abbb5, 0x654f3b1d, 0x41cd2105, - 0xd81e799e, 0x86854dc7, 0xe44b476a, 0x3d816250, 0xcf62a1f2, 0x5b8d2646, - 0xfc8883a0, 0xc1c7b6a3, 0x7f1524c3, 0x69cb7492, 0x47848a0b, 0x5692b285, - 0x095bbf00, 0xad19489d, 0x1462b174, 0x23820e00, 0x58428d2a, 0x0c55f5ea, - 0x1dadf43e, 0x233f7061, 0x3372f092, 0x8d937e41, 0xd65fecf1, 0x6c223bdb, - 0x7cde3759, 0xcbee7460, 0x4085f2a7, 0xce77326e, 0xa6078084, 0x19f8509e, - 0xe8efd855, 0x61d99735, 0xa969a7aa, 0xc50c06c2, 0x5a04abfc, 0x800bcadc, - 0x9e447a2e, 0xc3453484, 0xfdd56705, 0x0e1e9ec9, 0xdb73dbd3, 0x105588cd, - 0x675fda79, 0xe3674340, 0xc5c43465, 0x713e38d8, 0x3d28f89e, 0xf16dff20, - 0x153e21e7, 0x8fb03d4a, 0xe6e39f2b, 0xdb83adf7, -} - -var s2 = [256]uint32{ - 0xe93d5a68, 0x948140f7, 0xf64c261c, 0x94692934, 0x411520f7, 0x7602d4f7, - 0xbcf46b2e, 0xd4a20068, 0xd4082471, 0x3320f46a, 0x43b7d4b7, 0x500061af, - 0x1e39f62e, 0x97244546, 0x14214f74, 0xbf8b8840, 0x4d95fc1d, 0x96b591af, - 0x70f4ddd3, 0x66a02f45, 0xbfbc09ec, 0x03bd9785, 0x7fac6dd0, 0x31cb8504, - 0x96eb27b3, 0x55fd3941, 0xda2547e6, 0xabca0a9a, 0x28507825, 0x530429f4, - 0x0a2c86da, 0xe9b66dfb, 0x68dc1462, 0xd7486900, 0x680ec0a4, 0x27a18dee, - 0x4f3ffea2, 0xe887ad8c, 0xb58ce006, 0x7af4d6b6, 0xaace1e7c, 0xd3375fec, - 0xce78a399, 0x406b2a42, 0x20fe9e35, 0xd9f385b9, 0xee39d7ab, 0x3b124e8b, - 0x1dc9faf7, 0x4b6d1856, 0x26a36631, 0xeae397b2, 0x3a6efa74, 0xdd5b4332, - 0x6841e7f7, 0xca7820fb, 0xfb0af54e, 0xd8feb397, 0x454056ac, 0xba489527, - 0x55533a3a, 0x20838d87, 0xfe6ba9b7, 0xd096954b, 0x55a867bc, 0xa1159a58, - 0xcca92963, 0x99e1db33, 0xa62a4a56, 0x3f3125f9, 0x5ef47e1c, 0x9029317c, - 0xfdf8e802, 0x04272f70, 0x80bb155c, 0x05282ce3, 0x95c11548, 0xe4c66d22, - 0x48c1133f, 0xc70f86dc, 0x07f9c9ee, 0x41041f0f, 0x404779a4, 0x5d886e17, - 0x325f51eb, 0xd59bc0d1, 0xf2bcc18f, 0x41113564, 0x257b7834, 0x602a9c60, - 0xdff8e8a3, 0x1f636c1b, 0x0e12b4c2, 0x02e1329e, 0xaf664fd1, 0xcad18115, - 0x6b2395e0, 0x333e92e1, 0x3b240b62, 0xeebeb922, 0x85b2a20e, 0xe6ba0d99, - 0xde720c8c, 0x2da2f728, 0xd0127845, 0x95b794fd, 0x647d0862, 0xe7ccf5f0, - 0x5449a36f, 0x877d48fa, 0xc39dfd27, 0xf33e8d1e, 0x0a476341, 0x992eff74, - 0x3a6f6eab, 0xf4f8fd37, 0xa812dc60, 0xa1ebddf8, 0x991be14c, 0xdb6e6b0d, - 0xc67b5510, 0x6d672c37, 0x2765d43b, 0xdcd0e804, 0xf1290dc7, 0xcc00ffa3, - 0xb5390f92, 0x690fed0b, 0x667b9ffb, 0xcedb7d9c, 0xa091cf0b, 0xd9155ea3, - 0xbb132f88, 0x515bad24, 0x7b9479bf, 0x763bd6eb, 0x37392eb3, 0xcc115979, - 0x8026e297, 0xf42e312d, 0x6842ada7, 0xc66a2b3b, 0x12754ccc, 0x782ef11c, - 0x6a124237, 0xb79251e7, 0x06a1bbe6, 0x4bfb6350, 0x1a6b1018, 0x11caedfa, - 0x3d25bdd8, 0xe2e1c3c9, 0x44421659, 0x0a121386, 0xd90cec6e, 0xd5abea2a, - 0x64af674e, 0xda86a85f, 0xbebfe988, 0x64e4c3fe, 0x9dbc8057, 0xf0f7c086, - 0x60787bf8, 0x6003604d, 0xd1fd8346, 0xf6381fb0, 0x7745ae04, 0xd736fccc, - 0x83426b33, 0xf01eab71, 0xb0804187, 0x3c005e5f, 0x77a057be, 0xbde8ae24, - 0x55464299, 0xbf582e61, 0x4e58f48f, 0xf2ddfda2, 0xf474ef38, 0x8789bdc2, - 0x5366f9c3, 0xc8b38e74, 0xb475f255, 0x46fcd9b9, 0x7aeb2661, 0x8b1ddf84, - 0x846a0e79, 0x915f95e2, 0x466e598e, 0x20b45770, 0x8cd55591, 0xc902de4c, - 0xb90bace1, 0xbb8205d0, 0x11a86248, 0x7574a99e, 0xb77f19b6, 0xe0a9dc09, - 0x662d09a1, 0xc4324633, 0xe85a1f02, 0x09f0be8c, 0x4a99a025, 0x1d6efe10, - 0x1ab93d1d, 0x0ba5a4df, 0xa186f20f, 0x2868f169, 0xdcb7da83, 0x573906fe, - 0xa1e2ce9b, 0x4fcd7f52, 0x50115e01, 0xa70683fa, 0xa002b5c4, 0x0de6d027, - 0x9af88c27, 0x773f8641, 0xc3604c06, 0x61a806b5, 0xf0177a28, 0xc0f586e0, - 0x006058aa, 0x30dc7d62, 0x11e69ed7, 0x2338ea63, 0x53c2dd94, 0xc2c21634, - 0xbbcbee56, 0x90bcb6de, 0xebfc7da1, 0xce591d76, 0x6f05e409, 0x4b7c0188, - 0x39720a3d, 0x7c927c24, 0x86e3725f, 0x724d9db9, 0x1ac15bb4, 0xd39eb8fc, - 0xed545578, 0x08fca5b5, 0xd83d7cd3, 0x4dad0fc4, 0x1e50ef5e, 0xb161e6f8, - 0xa28514d9, 0x6c51133c, 0x6fd5c7e7, 0x56e14ec4, 0x362abfce, 0xddc6c837, - 0xd79a3234, 0x92638212, 0x670efa8e, 0x406000e0, -} - -var s3 = [256]uint32{ - 0x3a39ce37, 0xd3faf5cf, 0xabc27737, 0x5ac52d1b, 0x5cb0679e, 0x4fa33742, - 0xd3822740, 0x99bc9bbe, 0xd5118e9d, 0xbf0f7315, 0xd62d1c7e, 0xc700c47b, - 0xb78c1b6b, 0x21a19045, 0xb26eb1be, 0x6a366eb4, 0x5748ab2f, 0xbc946e79, - 0xc6a376d2, 0x6549c2c8, 0x530ff8ee, 0x468dde7d, 0xd5730a1d, 0x4cd04dc6, - 0x2939bbdb, 0xa9ba4650, 0xac9526e8, 0xbe5ee304, 0xa1fad5f0, 0x6a2d519a, - 0x63ef8ce2, 0x9a86ee22, 0xc089c2b8, 0x43242ef6, 0xa51e03aa, 0x9cf2d0a4, - 0x83c061ba, 0x9be96a4d, 0x8fe51550, 0xba645bd6, 0x2826a2f9, 0xa73a3ae1, - 0x4ba99586, 0xef5562e9, 0xc72fefd3, 0xf752f7da, 0x3f046f69, 0x77fa0a59, - 0x80e4a915, 0x87b08601, 0x9b09e6ad, 0x3b3ee593, 0xe990fd5a, 0x9e34d797, - 0x2cf0b7d9, 0x022b8b51, 0x96d5ac3a, 0x017da67d, 0xd1cf3ed6, 0x7c7d2d28, - 0x1f9f25cf, 0xadf2b89b, 0x5ad6b472, 0x5a88f54c, 0xe029ac71, 0xe019a5e6, - 0x47b0acfd, 0xed93fa9b, 0xe8d3c48d, 0x283b57cc, 0xf8d56629, 0x79132e28, - 0x785f0191, 0xed756055, 0xf7960e44, 0xe3d35e8c, 0x15056dd4, 0x88f46dba, - 0x03a16125, 0x0564f0bd, 0xc3eb9e15, 0x3c9057a2, 0x97271aec, 0xa93a072a, - 0x1b3f6d9b, 0x1e6321f5, 0xf59c66fb, 0x26dcf319, 0x7533d928, 0xb155fdf5, - 0x03563482, 0x8aba3cbb, 0x28517711, 0xc20ad9f8, 0xabcc5167, 0xccad925f, - 0x4de81751, 0x3830dc8e, 0x379d5862, 0x9320f991, 0xea7a90c2, 0xfb3e7bce, - 0x5121ce64, 0x774fbe32, 0xa8b6e37e, 0xc3293d46, 0x48de5369, 0x6413e680, - 0xa2ae0810, 0xdd6db224, 0x69852dfd, 0x09072166, 0xb39a460a, 0x6445c0dd, - 0x586cdecf, 0x1c20c8ae, 0x5bbef7dd, 0x1b588d40, 0xccd2017f, 0x6bb4e3bb, - 0xdda26a7e, 0x3a59ff45, 0x3e350a44, 0xbcb4cdd5, 0x72eacea8, 0xfa6484bb, - 0x8d6612ae, 0xbf3c6f47, 0xd29be463, 0x542f5d9e, 0xaec2771b, 0xf64e6370, - 0x740e0d8d, 0xe75b1357, 0xf8721671, 0xaf537d5d, 0x4040cb08, 0x4eb4e2cc, - 0x34d2466a, 0x0115af84, 0xe1b00428, 0x95983a1d, 0x06b89fb4, 0xce6ea048, - 0x6f3f3b82, 0x3520ab82, 0x011a1d4b, 0x277227f8, 0x611560b1, 0xe7933fdc, - 0xbb3a792b, 0x344525bd, 0xa08839e1, 0x51ce794b, 0x2f32c9b7, 0xa01fbac9, - 0xe01cc87e, 0xbcc7d1f6, 0xcf0111c3, 0xa1e8aac7, 0x1a908749, 0xd44fbd9a, - 0xd0dadecb, 0xd50ada38, 0x0339c32a, 0xc6913667, 0x8df9317c, 0xe0b12b4f, - 0xf79e59b7, 0x43f5bb3a, 0xf2d519ff, 0x27d9459c, 0xbf97222c, 0x15e6fc2a, - 0x0f91fc71, 0x9b941525, 0xfae59361, 0xceb69ceb, 0xc2a86459, 0x12baa8d1, - 0xb6c1075e, 0xe3056a0c, 0x10d25065, 0xcb03a442, 0xe0ec6e0e, 0x1698db3b, - 0x4c98a0be, 0x3278e964, 0x9f1f9532, 0xe0d392df, 0xd3a0342b, 0x8971f21e, - 0x1b0a7441, 0x4ba3348c, 0xc5be7120, 0xc37632d8, 0xdf359f8d, 0x9b992f2e, - 0xe60b6f47, 0x0fe3f11d, 0xe54cda54, 0x1edad891, 0xce6279cf, 0xcd3e7e6f, - 0x1618b166, 0xfd2c1d05, 0x848fd2c5, 0xf6fb2299, 0xf523f357, 0xa6327623, - 0x93a83531, 0x56cccd02, 0xacf08162, 0x5a75ebb5, 0x6e163697, 0x88d273cc, - 0xde966292, 0x81b949d0, 0x4c50901b, 0x71c65614, 0xe6c6c7bd, 0x327a140a, - 0x45e1d006, 0xc3f27b9a, 0xc9aa53fd, 0x62a80f00, 0xbb25bfe2, 0x35bdd2f6, - 0x71126905, 0xb2040222, 0xb6cbcf7c, 0xcd769c2b, 0x53113ec0, 0x1640e3d3, - 0x38abbd60, 0x2547adf0, 0xba38209c, 0xf746ce76, 0x77afa1c5, 0x20756060, - 0x85cbfe4e, 0x8ae88dd8, 0x7aaaf9b0, 0x4cf9aa7e, 0x1948c25c, 0x02fb8a8c, - 0x01c36ae4, 0xd6ebe1f9, 0x90d4f869, 0xa65cdea0, 0x3f09252d, 0xc208e69f, - 0xb74e6132, 0xce77e25b, 0x578fdfe3, 0x3ac372e6, -} - -var p = [18]uint32{ - 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, - 0x082efa98, 0xec4e6c89, 0x452821e6, 0x38d01377, 0xbe5466cf, 0x34e90c6c, - 0xc0ac29b7, 0xc97c50dd, 0x3f84d5b5, 0xb5470917, 0x9216d5d9, 0x8979fb1b, -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go deleted file mode 100644 index 94c71ac1ac86..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 && gc && !purego -// +build go1.11,gc,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s b/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s deleted file mode 100644 index 8fb49a13e3bf..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_arm64.s +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.11,gc,!purego - -#include "textflag.h" - -#define NUM_ROUNDS 10 - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD dst+0(FP), R1 - MOVD src+24(FP), R2 - MOVD src_len+32(FP), R3 - MOVD key+48(FP), R4 - MOVD nonce+56(FP), R6 - MOVD counter+64(FP), R7 - - MOVD $·constants(SB), R10 - MOVD $·incRotMatrix(SB), R11 - - MOVW (R7), R20 - - AND $~255, R3, R13 - ADD R2, R13, R12 // R12 for block end - AND $255, R3, R13 -loop: - MOVD $NUM_ROUNDS, R21 - VLD1 (R11), [V30.S4, V31.S4] - - // load contants - // VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] - WORD $0x4D60E940 - - // load keys - // VLD4R 16(R4), [V4.S4, V5.S4, V6.S4, V7.S4] - WORD $0x4DFFE884 - // VLD4R 16(R4), [V8.S4, V9.S4, V10.S4, V11.S4] - WORD $0x4DFFE888 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V12.S4] - WORD $0x4D40C8EC - - // VLD3R (R6), [V13.S4, V14.S4, V15.S4] - WORD $0x4D40E8CD - - // update counter - VADD V30.S4, V12.S4, V12.S4 - -chacha: - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 16) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 12) - VADD V8.S4, V12.S4, V8.S4 - VADD V9.S4, V13.S4, V9.S4 - VADD V10.S4, V14.S4, V10.S4 - VADD V11.S4, V15.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $12, V16.S4, V4.S4 - VSHL $12, V17.S4, V5.S4 - VSHL $12, V18.S4, V6.S4 - VSHL $12, V19.S4, V7.S4 - VSRI $20, V16.S4, V4.S4 - VSRI $20, V17.S4, V5.S4 - VSRI $20, V18.S4, V6.S4 - VSRI $20, V19.S4, V7.S4 - - // V0..V3 += V4..V7 - // V12..V15 <<<= ((V12..V15 XOR V0..V3), 8) - VADD V0.S4, V4.S4, V0.S4 - VADD V1.S4, V5.S4, V1.S4 - VADD V2.S4, V6.S4, V2.S4 - VADD V3.S4, V7.S4, V3.S4 - VEOR V12.B16, V0.B16, V12.B16 - VEOR V13.B16, V1.B16, V13.B16 - VEOR V14.B16, V2.B16, V14.B16 - VEOR V15.B16, V3.B16, V15.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V8..V11 += V12..V15 - // V4..V7 <<<= ((V4..V7 XOR V8..V11), 7) - VADD V12.S4, V8.S4, V8.S4 - VADD V13.S4, V9.S4, V9.S4 - VADD V14.S4, V10.S4, V10.S4 - VADD V15.S4, V11.S4, V11.S4 - VEOR V8.B16, V4.B16, V16.B16 - VEOR V9.B16, V5.B16, V17.B16 - VEOR V10.B16, V6.B16, V18.B16 - VEOR V11.B16, V7.B16, V19.B16 - VSHL $7, V16.S4, V4.S4 - VSHL $7, V17.S4, V5.S4 - VSHL $7, V18.S4, V6.S4 - VSHL $7, V19.S4, V7.S4 - VSRI $25, V16.S4, V4.S4 - VSRI $25, V17.S4, V5.S4 - VSRI $25, V18.S4, V6.S4 - VSRI $25, V19.S4, V7.S4 - - // V0..V3 += V5..V7, V4 - // V15,V12-V14 <<<= ((V15,V12-V14 XOR V0..V3), 16) - VADD V0.S4, V5.S4, V0.S4 - VADD V1.S4, V6.S4, V1.S4 - VADD V2.S4, V7.S4, V2.S4 - VADD V3.S4, V4.S4, V3.S4 - VEOR V15.B16, V0.B16, V15.B16 - VEOR V12.B16, V1.B16, V12.B16 - VEOR V13.B16, V2.B16, V13.B16 - VEOR V14.B16, V3.B16, V14.B16 - VREV32 V12.H8, V12.H8 - VREV32 V13.H8, V13.H8 - VREV32 V14.H8, V14.H8 - VREV32 V15.H8, V15.H8 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 12) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $12, V16.S4, V5.S4 - VSHL $12, V17.S4, V6.S4 - VSHL $12, V18.S4, V7.S4 - VSHL $12, V19.S4, V4.S4 - VSRI $20, V16.S4, V5.S4 - VSRI $20, V17.S4, V6.S4 - VSRI $20, V18.S4, V7.S4 - VSRI $20, V19.S4, V4.S4 - - // V0 += V5; V15 <<<= ((V0 XOR V15), 8) - // ... - VADD V5.S4, V0.S4, V0.S4 - VADD V6.S4, V1.S4, V1.S4 - VADD V7.S4, V2.S4, V2.S4 - VADD V4.S4, V3.S4, V3.S4 - VEOR V0.B16, V15.B16, V15.B16 - VEOR V1.B16, V12.B16, V12.B16 - VEOR V2.B16, V13.B16, V13.B16 - VEOR V3.B16, V14.B16, V14.B16 - VTBL V31.B16, [V12.B16], V12.B16 - VTBL V31.B16, [V13.B16], V13.B16 - VTBL V31.B16, [V14.B16], V14.B16 - VTBL V31.B16, [V15.B16], V15.B16 - - // V10 += V15; V5 <<<= ((V10 XOR V5), 7) - // ... - VADD V15.S4, V10.S4, V10.S4 - VADD V12.S4, V11.S4, V11.S4 - VADD V13.S4, V8.S4, V8.S4 - VADD V14.S4, V9.S4, V9.S4 - VEOR V10.B16, V5.B16, V16.B16 - VEOR V11.B16, V6.B16, V17.B16 - VEOR V8.B16, V7.B16, V18.B16 - VEOR V9.B16, V4.B16, V19.B16 - VSHL $7, V16.S4, V5.S4 - VSHL $7, V17.S4, V6.S4 - VSHL $7, V18.S4, V7.S4 - VSHL $7, V19.S4, V4.S4 - VSRI $25, V16.S4, V5.S4 - VSRI $25, V17.S4, V6.S4 - VSRI $25, V18.S4, V7.S4 - VSRI $25, V19.S4, V4.S4 - - SUB $1, R21 - CBNZ R21, chacha - - // VLD4R (R10), [V16.S4, V17.S4, V18.S4, V19.S4] - WORD $0x4D60E950 - - // VLD4R 16(R4), [V20.S4, V21.S4, V22.S4, V23.S4] - WORD $0x4DFFE894 - VADD V30.S4, V12.S4, V12.S4 - VADD V16.S4, V0.S4, V0.S4 - VADD V17.S4, V1.S4, V1.S4 - VADD V18.S4, V2.S4, V2.S4 - VADD V19.S4, V3.S4, V3.S4 - // VLD4R 16(R4), [V24.S4, V25.S4, V26.S4, V27.S4] - WORD $0x4DFFE898 - // restore R4 - SUB $32, R4 - - // load counter + nonce - // VLD1R (R7), [V28.S4] - WORD $0x4D40C8FC - // VLD3R (R6), [V29.S4, V30.S4, V31.S4] - WORD $0x4D40E8DD - - VADD V20.S4, V4.S4, V4.S4 - VADD V21.S4, V5.S4, V5.S4 - VADD V22.S4, V6.S4, V6.S4 - VADD V23.S4, V7.S4, V7.S4 - VADD V24.S4, V8.S4, V8.S4 - VADD V25.S4, V9.S4, V9.S4 - VADD V26.S4, V10.S4, V10.S4 - VADD V27.S4, V11.S4, V11.S4 - VADD V28.S4, V12.S4, V12.S4 - VADD V29.S4, V13.S4, V13.S4 - VADD V30.S4, V14.S4, V14.S4 - VADD V31.S4, V15.S4, V15.S4 - - VZIP1 V1.S4, V0.S4, V16.S4 - VZIP2 V1.S4, V0.S4, V17.S4 - VZIP1 V3.S4, V2.S4, V18.S4 - VZIP2 V3.S4, V2.S4, V19.S4 - VZIP1 V5.S4, V4.S4, V20.S4 - VZIP2 V5.S4, V4.S4, V21.S4 - VZIP1 V7.S4, V6.S4, V22.S4 - VZIP2 V7.S4, V6.S4, V23.S4 - VZIP1 V9.S4, V8.S4, V24.S4 - VZIP2 V9.S4, V8.S4, V25.S4 - VZIP1 V11.S4, V10.S4, V26.S4 - VZIP2 V11.S4, V10.S4, V27.S4 - VZIP1 V13.S4, V12.S4, V28.S4 - VZIP2 V13.S4, V12.S4, V29.S4 - VZIP1 V15.S4, V14.S4, V30.S4 - VZIP2 V15.S4, V14.S4, V31.S4 - VZIP1 V18.D2, V16.D2, V0.D2 - VZIP2 V18.D2, V16.D2, V4.D2 - VZIP1 V19.D2, V17.D2, V8.D2 - VZIP2 V19.D2, V17.D2, V12.D2 - VLD1.P 64(R2), [V16.B16, V17.B16, V18.B16, V19.B16] - - VZIP1 V22.D2, V20.D2, V1.D2 - VZIP2 V22.D2, V20.D2, V5.D2 - VZIP1 V23.D2, V21.D2, V9.D2 - VZIP2 V23.D2, V21.D2, V13.D2 - VLD1.P 64(R2), [V20.B16, V21.B16, V22.B16, V23.B16] - VZIP1 V26.D2, V24.D2, V2.D2 - VZIP2 V26.D2, V24.D2, V6.D2 - VZIP1 V27.D2, V25.D2, V10.D2 - VZIP2 V27.D2, V25.D2, V14.D2 - VLD1.P 64(R2), [V24.B16, V25.B16, V26.B16, V27.B16] - VZIP1 V30.D2, V28.D2, V3.D2 - VZIP2 V30.D2, V28.D2, V7.D2 - VZIP1 V31.D2, V29.D2, V11.D2 - VZIP2 V31.D2, V29.D2, V15.D2 - VLD1.P 64(R2), [V28.B16, V29.B16, V30.B16, V31.B16] - VEOR V0.B16, V16.B16, V16.B16 - VEOR V1.B16, V17.B16, V17.B16 - VEOR V2.B16, V18.B16, V18.B16 - VEOR V3.B16, V19.B16, V19.B16 - VST1.P [V16.B16, V17.B16, V18.B16, V19.B16], 64(R1) - VEOR V4.B16, V20.B16, V20.B16 - VEOR V5.B16, V21.B16, V21.B16 - VEOR V6.B16, V22.B16, V22.B16 - VEOR V7.B16, V23.B16, V23.B16 - VST1.P [V20.B16, V21.B16, V22.B16, V23.B16], 64(R1) - VEOR V8.B16, V24.B16, V24.B16 - VEOR V9.B16, V25.B16, V25.B16 - VEOR V10.B16, V26.B16, V26.B16 - VEOR V11.B16, V27.B16, V27.B16 - VST1.P [V24.B16, V25.B16, V26.B16, V27.B16], 64(R1) - VEOR V12.B16, V28.B16, V28.B16 - VEOR V13.B16, V29.B16, V29.B16 - VEOR V14.B16, V30.B16, V30.B16 - VEOR V15.B16, V31.B16, V31.B16 - VST1.P [V28.B16, V29.B16, V30.B16, V31.B16], 64(R1) - - ADD $4, R20 - MOVW R20, (R7) // update counter - - CMP R2, R12 - BGT loop - - RET - - -DATA ·constants+0x00(SB)/4, $0x61707865 -DATA ·constants+0x04(SB)/4, $0x3320646e -DATA ·constants+0x08(SB)/4, $0x79622d32 -DATA ·constants+0x0c(SB)/4, $0x6b206574 -GLOBL ·constants(SB), NOPTR|RODATA, $32 - -DATA ·incRotMatrix+0x00(SB)/4, $0x00000000 -DATA ·incRotMatrix+0x04(SB)/4, $0x00000001 -DATA ·incRotMatrix+0x08(SB)/4, $0x00000002 -DATA ·incRotMatrix+0x0c(SB)/4, $0x00000003 -DATA ·incRotMatrix+0x10(SB)/4, $0x02010003 -DATA ·incRotMatrix+0x14(SB)/4, $0x06050407 -DATA ·incRotMatrix+0x18(SB)/4, $0x0A09080B -DATA ·incRotMatrix+0x1c(SB)/4, $0x0E0D0C0F -GLOBL ·incRotMatrix(SB), NOPTR|RODATA, $32 diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go b/vendor/golang.org/x/crypto/chacha20/chacha_generic.go deleted file mode 100644 index a2ecf5c325b9..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_generic.go +++ /dev/null @@ -1,398 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package chacha20 implements the ChaCha20 and XChaCha20 encryption algorithms -// as specified in RFC 8439 and draft-irtf-cfrg-xchacha-01. -package chacha20 - -import ( - "crypto/cipher" - "encoding/binary" - "errors" - "math/bits" - - "golang.org/x/crypto/internal/subtle" -) - -const ( - // KeySize is the size of the key used by this cipher, in bytes. - KeySize = 32 - - // NonceSize is the size of the nonce used with the standard variant of this - // cipher, in bytes. - // - // Note that this is too short to be safely generated at random if the same - // key is reused more than 2³² times. - NonceSize = 12 - - // NonceSizeX is the size of the nonce used with the XChaCha20 variant of - // this cipher, in bytes. - NonceSizeX = 24 -) - -// Cipher is a stateful instance of ChaCha20 or XChaCha20 using a particular key -// and nonce. A *Cipher implements the cipher.Stream interface. -type Cipher struct { - // The ChaCha20 state is 16 words: 4 constant, 8 of key, 1 of counter - // (incremented after each block), and 3 of nonce. - key [8]uint32 - counter uint32 - nonce [3]uint32 - - // The last len bytes of buf are leftover key stream bytes from the previous - // XORKeyStream invocation. The size of buf depends on how many blocks are - // computed at a time by xorKeyStreamBlocks. - buf [bufSize]byte - len int - - // overflow is set when the counter overflowed, no more blocks can be - // generated, and the next XORKeyStream call should panic. - overflow bool - - // The counter-independent results of the first round are cached after they - // are computed the first time. - precompDone bool - p1, p5, p9, p13 uint32 - p2, p6, p10, p14 uint32 - p3, p7, p11, p15 uint32 -} - -var _ cipher.Stream = (*Cipher)(nil) - -// NewUnauthenticatedCipher creates a new ChaCha20 stream cipher with the given -// 32 bytes key and a 12 or 24 bytes nonce. If a nonce of 24 bytes is provided, -// the XChaCha20 construction will be used. It returns an error if key or nonce -// have any other length. -// -// Note that ChaCha20, like all stream ciphers, is not authenticated and allows -// attackers to silently tamper with the plaintext. For this reason, it is more -// appropriate as a building block than as a standalone encryption mechanism. -// Instead, consider using package golang.org/x/crypto/chacha20poly1305. -func NewUnauthenticatedCipher(key, nonce []byte) (*Cipher, error) { - // This function is split into a wrapper so that the Cipher allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - c := &Cipher{} - return newUnauthenticatedCipher(c, key, nonce) -} - -func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong key size") - } - if len(nonce) == NonceSizeX { - // XChaCha20 uses the ChaCha20 core to mix 16 bytes of the nonce into a - // derived key, allowing it to operate on a nonce of 24 bytes. See - // draft-irtf-cfrg-xchacha-01, Section 2.3. - key, _ = HChaCha20(key, nonce[0:16]) - cNonce := make([]byte, NonceSize) - copy(cNonce[4:12], nonce[16:24]) - nonce = cNonce - } else if len(nonce) != NonceSize { - return nil, errors.New("chacha20: wrong nonce size") - } - - key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint - c.key = [8]uint32{ - binary.LittleEndian.Uint32(key[0:4]), - binary.LittleEndian.Uint32(key[4:8]), - binary.LittleEndian.Uint32(key[8:12]), - binary.LittleEndian.Uint32(key[12:16]), - binary.LittleEndian.Uint32(key[16:20]), - binary.LittleEndian.Uint32(key[20:24]), - binary.LittleEndian.Uint32(key[24:28]), - binary.LittleEndian.Uint32(key[28:32]), - } - c.nonce = [3]uint32{ - binary.LittleEndian.Uint32(nonce[0:4]), - binary.LittleEndian.Uint32(nonce[4:8]), - binary.LittleEndian.Uint32(nonce[8:12]), - } - return c, nil -} - -// The constant first 4 words of the ChaCha20 state. -const ( - j0 uint32 = 0x61707865 // expa - j1 uint32 = 0x3320646e // nd 3 - j2 uint32 = 0x79622d32 // 2-by - j3 uint32 = 0x6b206574 // te k -) - -const blockSize = 64 - -// quarterRound is the core of ChaCha20. It shuffles the bits of 4 state words. -// It's executed 4 times for each of the 20 ChaCha20 rounds, operating on all 16 -// words each round, in columnar or diagonal groups of 4 at a time. -func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) { - a += b - d ^= a - d = bits.RotateLeft32(d, 16) - c += d - b ^= c - b = bits.RotateLeft32(b, 12) - a += b - d ^= a - d = bits.RotateLeft32(d, 8) - c += d - b ^= c - b = bits.RotateLeft32(b, 7) - return a, b, c, d -} - -// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will -// behave as if (64 * counter) bytes had been encrypted so far. -// -// To prevent accidental counter reuse, SetCounter panics if counter is less -// than the current value. -// -// Note that the execution time of XORKeyStream is not independent of the -// counter value. -func (s *Cipher) SetCounter(counter uint32) { - // Internally, s may buffer multiple blocks, which complicates this - // implementation slightly. When checking whether the counter has rolled - // back, we must use both s.counter and s.len to determine how many blocks - // we have already output. - outputCounter := s.counter - uint32(s.len)/blockSize - if s.overflow || counter < outputCounter { - panic("chacha20: SetCounter attempted to rollback counter") - } - - // In the general case, we set the new counter value and reset s.len to 0, - // causing the next call to XORKeyStream to refill the buffer. However, if - // we're advancing within the existing buffer, we can save work by simply - // setting s.len. - if counter < s.counter { - s.len = int(s.counter-counter) * blockSize - } else { - s.counter = counter - s.len = 0 - } -} - -// XORKeyStream XORs each byte in the given slice with a byte from the -// cipher's key stream. Dst and src must overlap entirely or not at all. -// -// If len(dst) < len(src), XORKeyStream will panic. It is acceptable -// to pass a dst bigger than src, and in that case, XORKeyStream will -// only update dst[:len(src)] and will not touch the rest of dst. -// -// Multiple calls to XORKeyStream behave as if the concatenation of -// the src buffers was passed in a single run. That is, Cipher -// maintains state and does not reset at each XORKeyStream call. -func (s *Cipher) XORKeyStream(dst, src []byte) { - if len(src) == 0 { - return - } - if len(dst) < len(src) { - panic("chacha20: output smaller than input") - } - dst = dst[:len(src)] - if subtle.InexactOverlap(dst, src) { - panic("chacha20: invalid buffer overlap") - } - - // First, drain any remaining key stream from a previous XORKeyStream. - if s.len != 0 { - keyStream := s.buf[bufSize-s.len:] - if len(src) < len(keyStream) { - keyStream = keyStream[:len(src)] - } - _ = src[len(keyStream)-1] // bounds check elimination hint - for i, b := range keyStream { - dst[i] = src[i] ^ b - } - s.len -= len(keyStream) - dst, src = dst[len(keyStream):], src[len(keyStream):] - } - if len(src) == 0 { - return - } - - // If we'd need to let the counter overflow and keep generating output, - // panic immediately. If instead we'd only reach the last block, remember - // not to generate any more output after the buffer is drained. - numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize - if s.overflow || uint64(s.counter)+numBlocks > 1<<32 { - panic("chacha20: counter overflow") - } else if uint64(s.counter)+numBlocks == 1<<32 { - s.overflow = true - } - - // xorKeyStreamBlocks implementations expect input lengths that are a - // multiple of bufSize. Platform-specific ones process multiple blocks at a - // time, so have bufSizes that are a multiple of blockSize. - - full := len(src) - len(src)%bufSize - if full > 0 { - s.xorKeyStreamBlocks(dst[:full], src[:full]) - } - dst, src = dst[full:], src[full:] - - // If using a multi-block xorKeyStreamBlocks would overflow, use the generic - // one that does one block at a time. - const blocksPerBuf = bufSize / blockSize - if uint64(s.counter)+blocksPerBuf > 1<<32 { - s.buf = [bufSize]byte{} - numBlocks := (len(src) + blockSize - 1) / blockSize - buf := s.buf[bufSize-numBlocks*blockSize:] - copy(buf, src) - s.xorKeyStreamBlocksGeneric(buf, buf) - s.len = len(buf) - copy(dst, buf) - return - } - - // If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and - // keep the leftover keystream for the next XORKeyStream invocation. - if len(src) > 0 { - s.buf = [bufSize]byte{} - copy(s.buf[:], src) - s.xorKeyStreamBlocks(s.buf[:], s.buf[:]) - s.len = bufSize - copy(dst, s.buf[:]) - } -} - -func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) { - if len(dst) != len(src) || len(dst)%blockSize != 0 { - panic("chacha20: internal error: wrong dst and/or src length") - } - - // To generate each block of key stream, the initial cipher state - // (represented below) is passed through 20 rounds of shuffling, - // alternatively applying quarterRounds by columns (like 1, 5, 9, 13) - // or by diagonals (like 1, 6, 11, 12). - // - // 0:cccccccc 1:cccccccc 2:cccccccc 3:cccccccc - // 4:kkkkkkkk 5:kkkkkkkk 6:kkkkkkkk 7:kkkkkkkk - // 8:kkkkkkkk 9:kkkkkkkk 10:kkkkkkkk 11:kkkkkkkk - // 12:bbbbbbbb 13:nnnnnnnn 14:nnnnnnnn 15:nnnnnnnn - // - // c=constant k=key b=blockcount n=nonce - var ( - c0, c1, c2, c3 = j0, j1, j2, j3 - c4, c5, c6, c7 = s.key[0], s.key[1], s.key[2], s.key[3] - c8, c9, c10, c11 = s.key[4], s.key[5], s.key[6], s.key[7] - _, c13, c14, c15 = s.counter, s.nonce[0], s.nonce[1], s.nonce[2] - ) - - // Three quarters of the first round don't depend on the counter, so we can - // calculate them here, and reuse them for multiple blocks in the loop, and - // for future XORKeyStream invocations. - if !s.precompDone { - s.p1, s.p5, s.p9, s.p13 = quarterRound(c1, c5, c9, c13) - s.p2, s.p6, s.p10, s.p14 = quarterRound(c2, c6, c10, c14) - s.p3, s.p7, s.p11, s.p15 = quarterRound(c3, c7, c11, c15) - s.precompDone = true - } - - // A condition of len(src) > 0 would be sufficient, but this also - // acts as a bounds check elimination hint. - for len(src) >= 64 && len(dst) >= 64 { - // The remainder of the first column round. - fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter) - - // The second diagonal round. - x0, x5, x10, x15 := quarterRound(fcr0, s.p5, s.p10, s.p15) - x1, x6, x11, x12 := quarterRound(s.p1, s.p6, s.p11, fcr12) - x2, x7, x8, x13 := quarterRound(s.p2, s.p7, fcr8, s.p13) - x3, x4, x9, x14 := quarterRound(s.p3, fcr4, s.p9, s.p14) - - // The remaining 18 rounds. - for i := 0; i < 9; i++ { - // Column round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Diagonal round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - // Add back the initial state to generate the key stream, then - // XOR the key stream with the source and write out the result. - addXor(dst[0:4], src[0:4], x0, c0) - addXor(dst[4:8], src[4:8], x1, c1) - addXor(dst[8:12], src[8:12], x2, c2) - addXor(dst[12:16], src[12:16], x3, c3) - addXor(dst[16:20], src[16:20], x4, c4) - addXor(dst[20:24], src[20:24], x5, c5) - addXor(dst[24:28], src[24:28], x6, c6) - addXor(dst[28:32], src[28:32], x7, c7) - addXor(dst[32:36], src[32:36], x8, c8) - addXor(dst[36:40], src[36:40], x9, c9) - addXor(dst[40:44], src[40:44], x10, c10) - addXor(dst[44:48], src[44:48], x11, c11) - addXor(dst[48:52], src[48:52], x12, s.counter) - addXor(dst[52:56], src[52:56], x13, c13) - addXor(dst[56:60], src[56:60], x14, c14) - addXor(dst[60:64], src[60:64], x15, c15) - - s.counter += 1 - - src, dst = src[blockSize:], dst[blockSize:] - } -} - -// HChaCha20 uses the ChaCha20 core to generate a derived key from a 32 bytes -// key and a 16 bytes nonce. It returns an error if key or nonce have any other -// length. It is used as part of the XChaCha20 construction. -func HChaCha20(key, nonce []byte) ([]byte, error) { - // This function is split into a wrapper so that the slice allocation will - // be inlined, and depending on how the caller uses the return value, won't - // escape to the heap. - out := make([]byte, 32) - return hChaCha20(out, key, nonce) -} - -func hChaCha20(out, key, nonce []byte) ([]byte, error) { - if len(key) != KeySize { - return nil, errors.New("chacha20: wrong HChaCha20 key size") - } - if len(nonce) != 16 { - return nil, errors.New("chacha20: wrong HChaCha20 nonce size") - } - - x0, x1, x2, x3 := j0, j1, j2, j3 - x4 := binary.LittleEndian.Uint32(key[0:4]) - x5 := binary.LittleEndian.Uint32(key[4:8]) - x6 := binary.LittleEndian.Uint32(key[8:12]) - x7 := binary.LittleEndian.Uint32(key[12:16]) - x8 := binary.LittleEndian.Uint32(key[16:20]) - x9 := binary.LittleEndian.Uint32(key[20:24]) - x10 := binary.LittleEndian.Uint32(key[24:28]) - x11 := binary.LittleEndian.Uint32(key[28:32]) - x12 := binary.LittleEndian.Uint32(nonce[0:4]) - x13 := binary.LittleEndian.Uint32(nonce[4:8]) - x14 := binary.LittleEndian.Uint32(nonce[8:12]) - x15 := binary.LittleEndian.Uint32(nonce[12:16]) - - for i := 0; i < 10; i++ { - // Diagonal round. - x0, x4, x8, x12 = quarterRound(x0, x4, x8, x12) - x1, x5, x9, x13 = quarterRound(x1, x5, x9, x13) - x2, x6, x10, x14 = quarterRound(x2, x6, x10, x14) - x3, x7, x11, x15 = quarterRound(x3, x7, x11, x15) - - // Column round. - x0, x5, x10, x15 = quarterRound(x0, x5, x10, x15) - x1, x6, x11, x12 = quarterRound(x1, x6, x11, x12) - x2, x7, x8, x13 = quarterRound(x2, x7, x8, x13) - x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14) - } - - _ = out[31] // bounds check elimination hint - binary.LittleEndian.PutUint32(out[0:4], x0) - binary.LittleEndian.PutUint32(out[4:8], x1) - binary.LittleEndian.PutUint32(out[8:12], x2) - binary.LittleEndian.PutUint32(out[12:16], x3) - binary.LittleEndian.PutUint32(out[16:20], x12) - binary.LittleEndian.PutUint32(out[20:24], x13) - binary.LittleEndian.PutUint32(out[24:28], x14) - binary.LittleEndian.PutUint32(out[28:32], x15) - return out, nil -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go b/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go deleted file mode 100644 index 025b49897e32..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_noasm.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!arm64 && !s390x && !ppc64le) || (arm64 && !go1.11) || !gc || purego -// +build !arm64,!s390x,!ppc64le arm64,!go1.11 !gc purego - -package chacha20 - -const bufSize = blockSize - -func (s *Cipher) xorKeyStreamBlocks(dst, src []byte) { - s.xorKeyStreamBlocksGeneric(dst, src) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go deleted file mode 100644 index da420b2e97b0..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package chacha20 - -const bufSize = 256 - -//go:noescape -func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - chaCha20_ctr32_vsx(&dst[0], &src[0], len(src), &c.key, &c.counter) -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s b/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s deleted file mode 100644 index 3dad4b2fa27b..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s +++ /dev/null @@ -1,449 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Based on CRYPTOGAMS code with the following comment: -// # ==================================================================== -// # Written by Andy Polyakov for the OpenSSL -// # project. The module is, however, dual licensed under OpenSSL and -// # CRYPTOGAMS licenses depending on where you obtain it. For further -// # details see http://www.openssl.org/~appro/cryptogams/. -// # ==================================================================== - -// Code for the perl script that generates the ppc64 assembler -// can be found in the cryptogams repository at the link below. It is based on -// the original from openssl. - -// https://github.com/dot-asm/cryptogams/commit/a60f5b50ed908e91 - -// The differences in this and the original implementation are -// due to the calling conventions and initialization of constants. - -// +build gc,!purego - -#include "textflag.h" - -#define OUT R3 -#define INP R4 -#define LEN R5 -#define KEY R6 -#define CNT R7 -#define TMP R15 - -#define CONSTBASE R16 -#define BLOCKS R17 - -DATA consts<>+0x00(SB)/8, $0x3320646e61707865 -DATA consts<>+0x08(SB)/8, $0x6b20657479622d32 -DATA consts<>+0x10(SB)/8, $0x0000000000000001 -DATA consts<>+0x18(SB)/8, $0x0000000000000000 -DATA consts<>+0x20(SB)/8, $0x0000000000000004 -DATA consts<>+0x28(SB)/8, $0x0000000000000000 -DATA consts<>+0x30(SB)/8, $0x0a0b08090e0f0c0d -DATA consts<>+0x38(SB)/8, $0x0203000106070405 -DATA consts<>+0x40(SB)/8, $0x090a0b080d0e0f0c -DATA consts<>+0x48(SB)/8, $0x0102030005060704 -DATA consts<>+0x50(SB)/8, $0x6170786561707865 -DATA consts<>+0x58(SB)/8, $0x6170786561707865 -DATA consts<>+0x60(SB)/8, $0x3320646e3320646e -DATA consts<>+0x68(SB)/8, $0x3320646e3320646e -DATA consts<>+0x70(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x78(SB)/8, $0x79622d3279622d32 -DATA consts<>+0x80(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x88(SB)/8, $0x6b2065746b206574 -DATA consts<>+0x90(SB)/8, $0x0000000100000000 -DATA consts<>+0x98(SB)/8, $0x0000000300000002 -GLOBL consts<>(SB), RODATA, $0xa0 - -//func chaCha20_ctr32_vsx(out, inp *byte, len int, key *[8]uint32, counter *uint32) -TEXT ·chaCha20_ctr32_vsx(SB),NOSPLIT,$64-40 - MOVD out+0(FP), OUT - MOVD inp+8(FP), INP - MOVD len+16(FP), LEN - MOVD key+24(FP), KEY - MOVD counter+32(FP), CNT - - // Addressing for constants - MOVD $consts<>+0x00(SB), CONSTBASE - MOVD $16, R8 - MOVD $32, R9 - MOVD $48, R10 - MOVD $64, R11 - SRD $6, LEN, BLOCKS - // V16 - LXVW4X (CONSTBASE)(R0), VS48 - ADD $80,CONSTBASE - - // Load key into V17,V18 - LXVW4X (KEY)(R0), VS49 - LXVW4X (KEY)(R8), VS50 - - // Load CNT, NONCE into V19 - LXVW4X (CNT)(R0), VS51 - - // Clear V27 - VXOR V27, V27, V27 - - // V28 - LXVW4X (CONSTBASE)(R11), VS60 - - // splat slot from V19 -> V26 - VSPLTW $0, V19, V26 - - VSLDOI $4, V19, V27, V19 - VSLDOI $12, V27, V19, V19 - - VADDUWM V26, V28, V26 - - MOVD $10, R14 - MOVD R14, CTR - -loop_outer_vsx: - // V0, V1, V2, V3 - LXVW4X (R0)(CONSTBASE), VS32 - LXVW4X (R8)(CONSTBASE), VS33 - LXVW4X (R9)(CONSTBASE), VS34 - LXVW4X (R10)(CONSTBASE), VS35 - - // splat values from V17, V18 into V4-V11 - VSPLTW $0, V17, V4 - VSPLTW $1, V17, V5 - VSPLTW $2, V17, V6 - VSPLTW $3, V17, V7 - VSPLTW $0, V18, V8 - VSPLTW $1, V18, V9 - VSPLTW $2, V18, V10 - VSPLTW $3, V18, V11 - - // VOR - VOR V26, V26, V12 - - // splat values from V19 -> V13, V14, V15 - VSPLTW $1, V19, V13 - VSPLTW $2, V19, V14 - VSPLTW $3, V19, V15 - - // splat const values - VSPLTISW $-16, V27 - VSPLTISW $12, V28 - VSPLTISW $8, V29 - VSPLTISW $7, V30 - -loop_vsx: - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - VRLW V15, V27, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V28, V4 - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - - VADDUWM V0, V4, V0 - VADDUWM V1, V5, V1 - VADDUWM V2, V6, V2 - VADDUWM V3, V7, V3 - - VXOR V12, V0, V12 - VXOR V13, V1, V13 - VXOR V14, V2, V14 - VXOR V15, V3, V15 - - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - VRLW V15, V29, V15 - - VADDUWM V8, V12, V8 - VADDUWM V9, V13, V9 - VADDUWM V10, V14, V10 - VADDUWM V11, V15, V11 - - VXOR V4, V8, V4 - VXOR V5, V9, V5 - VXOR V6, V10, V6 - VXOR V7, V11, V7 - - VRLW V4, V30, V4 - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V27, V15 - VRLW V12, V27, V12 - VRLW V13, V27, V13 - VRLW V14, V27, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V28, V5 - VRLW V6, V28, V6 - VRLW V7, V28, V7 - VRLW V4, V28, V4 - - VADDUWM V0, V5, V0 - VADDUWM V1, V6, V1 - VADDUWM V2, V7, V2 - VADDUWM V3, V4, V3 - - VXOR V15, V0, V15 - VXOR V12, V1, V12 - VXOR V13, V2, V13 - VXOR V14, V3, V14 - - VRLW V15, V29, V15 - VRLW V12, V29, V12 - VRLW V13, V29, V13 - VRLW V14, V29, V14 - - VADDUWM V10, V15, V10 - VADDUWM V11, V12, V11 - VADDUWM V8, V13, V8 - VADDUWM V9, V14, V9 - - VXOR V5, V10, V5 - VXOR V6, V11, V6 - VXOR V7, V8, V7 - VXOR V4, V9, V4 - - VRLW V5, V30, V5 - VRLW V6, V30, V6 - VRLW V7, V30, V7 - VRLW V4, V30, V4 - BC 16, LT, loop_vsx - - VADDUWM V12, V26, V12 - - WORD $0x13600F8C // VMRGEW V0, V1, V27 - WORD $0x13821F8C // VMRGEW V2, V3, V28 - - WORD $0x10000E8C // VMRGOW V0, V1, V0 - WORD $0x10421E8C // VMRGOW V2, V3, V2 - - WORD $0x13A42F8C // VMRGEW V4, V5, V29 - WORD $0x13C63F8C // VMRGEW V6, V7, V30 - - XXPERMDI VS32, VS34, $0, VS33 - XXPERMDI VS32, VS34, $3, VS35 - XXPERMDI VS59, VS60, $0, VS32 - XXPERMDI VS59, VS60, $3, VS34 - - WORD $0x10842E8C // VMRGOW V4, V5, V4 - WORD $0x10C63E8C // VMRGOW V6, V7, V6 - - WORD $0x13684F8C // VMRGEW V8, V9, V27 - WORD $0x138A5F8C // VMRGEW V10, V11, V28 - - XXPERMDI VS36, VS38, $0, VS37 - XXPERMDI VS36, VS38, $3, VS39 - XXPERMDI VS61, VS62, $0, VS36 - XXPERMDI VS61, VS62, $3, VS38 - - WORD $0x11084E8C // VMRGOW V8, V9, V8 - WORD $0x114A5E8C // VMRGOW V10, V11, V10 - - WORD $0x13AC6F8C // VMRGEW V12, V13, V29 - WORD $0x13CE7F8C // VMRGEW V14, V15, V30 - - XXPERMDI VS40, VS42, $0, VS41 - XXPERMDI VS40, VS42, $3, VS43 - XXPERMDI VS59, VS60, $0, VS40 - XXPERMDI VS59, VS60, $3, VS42 - - WORD $0x118C6E8C // VMRGOW V12, V13, V12 - WORD $0x11CE7E8C // VMRGOW V14, V15, V14 - - VSPLTISW $4, V27 - VADDUWM V26, V27, V26 - - XXPERMDI VS44, VS46, $0, VS45 - XXPERMDI VS44, VS46, $3, VS47 - XXPERMDI VS61, VS62, $0, VS44 - XXPERMDI VS61, VS62, $3, VS46 - - VADDUWM V0, V16, V0 - VADDUWM V4, V17, V4 - VADDUWM V8, V18, V8 - VADDUWM V12, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - // Bottom of loop - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V1, V16, V0 - VADDUWM V5, V17, V4 - VADDUWM V9, V18, V8 - VADDUWM V13, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - VXOR V27, V0, V27 - - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(V10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V2, V16, V0 - VADDUWM V6, V17, V4 - VADDUWM V10, V18, V8 - VADDUWM V14, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - BEQ done_vsx - - VADDUWM V3, V16, V0 - VADDUWM V7, V17, V4 - VADDUWM V11, V18, V8 - VADDUWM V15, V19, V12 - - CMPU LEN, $64 - BLT tail_vsx - - LXVW4X (INP)(R0), VS59 - LXVW4X (INP)(R8), VS60 - LXVW4X (INP)(R9), VS61 - LXVW4X (INP)(R10), VS62 - - VXOR V27, V0, V27 - VXOR V28, V4, V28 - VXOR V29, V8, V29 - VXOR V30, V12, V30 - - STXVW4X VS59, (OUT)(R0) - STXVW4X VS60, (OUT)(R8) - ADD $64, INP - STXVW4X VS61, (OUT)(R9) - ADD $-64, LEN - STXVW4X VS62, (OUT)(R10) - ADD $64, OUT - - MOVD $10, R14 - MOVD R14, CTR - BNE loop_outer_vsx - -done_vsx: - // Increment counter by number of 64 byte blocks - MOVD (CNT), R14 - ADD BLOCKS, R14 - MOVD R14, (CNT) - RET - -tail_vsx: - ADD $32, R1, R11 - MOVD LEN, CTR - - // Save values on stack to copy from - STXVW4X VS32, (R11)(R0) - STXVW4X VS36, (R11)(R8) - STXVW4X VS40, (R11)(R9) - STXVW4X VS44, (R11)(R10) - ADD $-1, R11, R12 - ADD $-1, INP - ADD $-1, OUT - -looptail_vsx: - // Copying the result to OUT - // in bytes. - MOVBZU 1(R12), KEY - MOVBZU 1(INP), TMP - XOR KEY, TMP, KEY - MOVBU KEY, 1(OUT) - BC 16, LT, looptail_vsx - - // Clear the stack values - STXVW4X VS48, (R11)(R0) - STXVW4X VS48, (R11)(R8) - STXVW4X VS48, (R11)(R9) - STXVW4X VS48, (R11)(R10) - BR done_vsx diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go deleted file mode 100644 index c5898db46584..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package chacha20 - -import "golang.org/x/sys/cpu" - -var haveAsm = cpu.S390X.HasVX - -const bufSize = 256 - -// xorKeyStreamVX is an assembly implementation of XORKeyStream. It must only -// be called when the vector facility is available. Implementation in asm_s390x.s. -//go:noescape -func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) - -func (c *Cipher) xorKeyStreamBlocks(dst, src []byte) { - if cpu.S390X.HasVX { - xorKeyStreamVX(dst, src, &c.key, &c.nonce, &c.counter) - } else { - c.xorKeyStreamBlocksGeneric(dst, src) - } -} diff --git a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s b/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s deleted file mode 100644 index 818161189bc4..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/chacha_s390x.s +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc,!purego - -#include "go_asm.h" -#include "textflag.h" - -// This is an implementation of the ChaCha20 encryption algorithm as -// specified in RFC 7539. It uses vector instructions to compute -// 4 keystream blocks in parallel (256 bytes) which are then XORed -// with the bytes in the input slice. - -GLOBL ·constants<>(SB), RODATA|NOPTR, $32 -// BSWAP: swap bytes in each 4-byte element -DATA ·constants<>+0x00(SB)/4, $0x03020100 -DATA ·constants<>+0x04(SB)/4, $0x07060504 -DATA ·constants<>+0x08(SB)/4, $0x0b0a0908 -DATA ·constants<>+0x0c(SB)/4, $0x0f0e0d0c -// J0: [j0, j1, j2, j3] -DATA ·constants<>+0x10(SB)/4, $0x61707865 -DATA ·constants<>+0x14(SB)/4, $0x3320646e -DATA ·constants<>+0x18(SB)/4, $0x79622d32 -DATA ·constants<>+0x1c(SB)/4, $0x6b206574 - -#define BSWAP V5 -#define J0 V6 -#define KEY0 V7 -#define KEY1 V8 -#define NONCE V9 -#define CTR V10 -#define M0 V11 -#define M1 V12 -#define M2 V13 -#define M3 V14 -#define INC V15 -#define X0 V16 -#define X1 V17 -#define X2 V18 -#define X3 V19 -#define X4 V20 -#define X5 V21 -#define X6 V22 -#define X7 V23 -#define X8 V24 -#define X9 V25 -#define X10 V26 -#define X11 V27 -#define X12 V28 -#define X13 V29 -#define X14 V30 -#define X15 V31 - -#define NUM_ROUNDS 20 - -#define ROUND4(a0, a1, a2, a3, b0, b1, b2, b3, c0, c1, c2, c3, d0, d1, d2, d3) \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $16, a2, a2 \ - VERLLF $16, b2, b2 \ - VERLLF $16, c2, c2 \ - VERLLF $16, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $12, a1, a1 \ - VERLLF $12, b1, b1 \ - VERLLF $12, c1, c1 \ - VERLLF $12, d1, d1 \ - VAF a1, a0, a0 \ - VAF b1, b0, b0 \ - VAF c1, c0, c0 \ - VAF d1, d0, d0 \ - VX a0, a2, a2 \ - VX b0, b2, b2 \ - VX c0, c2, c2 \ - VX d0, d2, d2 \ - VERLLF $8, a2, a2 \ - VERLLF $8, b2, b2 \ - VERLLF $8, c2, c2 \ - VERLLF $8, d2, d2 \ - VAF a2, a3, a3 \ - VAF b2, b3, b3 \ - VAF c2, c3, c3 \ - VAF d2, d3, d3 \ - VX a3, a1, a1 \ - VX b3, b1, b1 \ - VX c3, c1, c1 \ - VX d3, d1, d1 \ - VERLLF $7, a1, a1 \ - VERLLF $7, b1, b1 \ - VERLLF $7, c1, c1 \ - VERLLF $7, d1, d1 - -#define PERMUTE(mask, v0, v1, v2, v3) \ - VPERM v0, v0, mask, v0 \ - VPERM v1, v1, mask, v1 \ - VPERM v2, v2, mask, v2 \ - VPERM v3, v3, mask, v3 - -#define ADDV(x, v0, v1, v2, v3) \ - VAF x, v0, v0 \ - VAF x, v1, v1 \ - VAF x, v2, v2 \ - VAF x, v3, v3 - -#define XORV(off, dst, src, v0, v1, v2, v3) \ - VLM off(src), M0, M3 \ - PERMUTE(BSWAP, v0, v1, v2, v3) \ - VX v0, M0, M0 \ - VX v1, M1, M1 \ - VX v2, M2, M2 \ - VX v3, M3, M3 \ - VSTM M0, M3, off(dst) - -#define SHUFFLE(a, b, c, d, t, u, v, w) \ - VMRHF a, c, t \ // t = {a[0], c[0], a[1], c[1]} - VMRHF b, d, u \ // u = {b[0], d[0], b[1], d[1]} - VMRLF a, c, v \ // v = {a[2], c[2], a[3], c[3]} - VMRLF b, d, w \ // w = {b[2], d[2], b[3], d[3]} - VMRHF t, u, a \ // a = {a[0], b[0], c[0], d[0]} - VMRLF t, u, b \ // b = {a[1], b[1], c[1], d[1]} - VMRHF v, w, c \ // c = {a[2], b[2], c[2], d[2]} - VMRLF v, w, d // d = {a[3], b[3], c[3], d[3]} - -// func xorKeyStreamVX(dst, src []byte, key *[8]uint32, nonce *[3]uint32, counter *uint32) -TEXT ·xorKeyStreamVX(SB), NOSPLIT, $0 - MOVD $·constants<>(SB), R1 - MOVD dst+0(FP), R2 // R2=&dst[0] - LMG src+24(FP), R3, R4 // R3=&src[0] R4=len(src) - MOVD key+48(FP), R5 // R5=key - MOVD nonce+56(FP), R6 // R6=nonce - MOVD counter+64(FP), R7 // R7=counter - - // load BSWAP and J0 - VLM (R1), BSWAP, J0 - - // setup - MOVD $95, R0 - VLM (R5), KEY0, KEY1 - VLL R0, (R6), NONCE - VZERO M0 - VLEIB $7, $32, M0 - VSRLB M0, NONCE, NONCE - - // initialize counter values - VLREPF (R7), CTR - VZERO INC - VLEIF $1, $1, INC - VLEIF $2, $2, INC - VLEIF $3, $3, INC - VAF INC, CTR, CTR - VREPIF $4, INC - -chacha: - VREPF $0, J0, X0 - VREPF $1, J0, X1 - VREPF $2, J0, X2 - VREPF $3, J0, X3 - VREPF $0, KEY0, X4 - VREPF $1, KEY0, X5 - VREPF $2, KEY0, X6 - VREPF $3, KEY0, X7 - VREPF $0, KEY1, X8 - VREPF $1, KEY1, X9 - VREPF $2, KEY1, X10 - VREPF $3, KEY1, X11 - VLR CTR, X12 - VREPF $1, NONCE, X13 - VREPF $2, NONCE, X14 - VREPF $3, NONCE, X15 - - MOVD $(NUM_ROUNDS/2), R1 - -loop: - ROUND4(X0, X4, X12, X8, X1, X5, X13, X9, X2, X6, X14, X10, X3, X7, X15, X11) - ROUND4(X0, X5, X15, X10, X1, X6, X12, X11, X2, X7, X13, X8, X3, X4, X14, X9) - - ADD $-1, R1 - BNE loop - - // decrement length - ADD $-256, R4 - - // rearrange vectors - SHUFFLE(X0, X1, X2, X3, M0, M1, M2, M3) - ADDV(J0, X0, X1, X2, X3) - SHUFFLE(X4, X5, X6, X7, M0, M1, M2, M3) - ADDV(KEY0, X4, X5, X6, X7) - SHUFFLE(X8, X9, X10, X11, M0, M1, M2, M3) - ADDV(KEY1, X8, X9, X10, X11) - VAF CTR, X12, X12 - SHUFFLE(X12, X13, X14, X15, M0, M1, M2, M3) - ADDV(NONCE, X12, X13, X14, X15) - - // increment counters - VAF INC, CTR, CTR - - // xor keystream with plaintext - XORV(0*64, R2, R3, X0, X4, X8, X12) - XORV(1*64, R2, R3, X1, X5, X9, X13) - XORV(2*64, R2, R3, X2, X6, X10, X14) - XORV(3*64, R2, R3, X3, X7, X11, X15) - - // increment pointers - MOVD $256(R2), R2 - MOVD $256(R3), R3 - - CMPBNE R4, $0, chacha - - VSTEF $0, CTR, (R7) - RET diff --git a/vendor/golang.org/x/crypto/chacha20/xor.go b/vendor/golang.org/x/crypto/chacha20/xor.go deleted file mode 100644 index c2d04851e0d1..000000000000 --- a/vendor/golang.org/x/crypto/chacha20/xor.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found src the LICENSE file. - -package chacha20 - -import "runtime" - -// Platforms that have fast unaligned 32-bit little endian accesses. -const unaligned = runtime.GOARCH == "386" || - runtime.GOARCH == "amd64" || - runtime.GOARCH == "arm64" || - runtime.GOARCH == "ppc64le" || - runtime.GOARCH == "s390x" - -// addXor reads a little endian uint32 from src, XORs it with (a + b) and -// places the result in little endian byte order in dst. -func addXor(dst, src []byte, a, b uint32) { - _, _ = src[3], dst[3] // bounds check elimination hint - if unaligned { - // The compiler should optimize this code into - // 32-bit unaligned little endian loads and stores. - // TODO: delete once the compiler does a reliably - // good job with the generic code below. - // See issue #25111 for more details. - v := uint32(src[0]) - v |= uint32(src[1]) << 8 - v |= uint32(src[2]) << 16 - v |= uint32(src[3]) << 24 - v ^= a + b - dst[0] = byte(v) - dst[1] = byte(v >> 8) - dst[2] = byte(v >> 16) - dst[3] = byte(v >> 24) - } else { - a += b - dst[0] = src[0] ^ byte(a) - dst[1] = src[1] ^ byte(a>>8) - dst[2] = src[2] ^ byte(a>>16) - dst[3] = src[3] ^ byte(a>>24) - } -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 4b9a655d1b56..000000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. -package curve25519 // import "golang.org/x/crypto/curve25519" - -import ( - "crypto/subtle" - "fmt" -) - -// ScalarMult sets dst to the product scalar * point. -// -// Deprecated: when provided a low-order point, ScalarMult will set dst to all -// zeroes, irrespective of the scalar. Instead, use the X25519 function, which -// will return an error. -func ScalarMult(dst, scalar, point *[32]byte) { - scalarMult(dst, scalar, point) -} - -// ScalarBaseMult sets dst to the product scalar * base where base is the -// standard generator. -// -// It is recommended to use the X25519 function with Basepoint instead, as -// copying into fixed size arrays can lead to unexpected bugs. -func ScalarBaseMult(dst, scalar *[32]byte) { - ScalarMult(dst, scalar, &basePoint) -} - -const ( - // ScalarSize is the size of the scalar input to X25519. - ScalarSize = 32 - // PointSize is the size of the point input to X25519. - PointSize = 32 -) - -// Basepoint is the canonical Curve25519 generator. -var Basepoint []byte - -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -func init() { Basepoint = basePoint[:] } - -func checkBasepoint() { - if subtle.ConstantTimeCompare(Basepoint, []byte{ - 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - }) != 1 { - panic("curve25519: global Basepoint value was modified") - } -} - -// X25519 returns the result of the scalar multiplication (scalar * point), -// according to RFC 7748, Section 5. scalar, point and the return value are -// slices of 32 bytes. -// -// scalar can be generated at random, for example with crypto/rand. point should -// be either Basepoint or the output of another X25519 call. -// -// If point is Basepoint (but not if it's a different slice with the same -// contents) a precomputed implementation might be used for performance. -func X25519(scalar, point []byte) ([]byte, error) { - // Outline the body of function, to let the allocation be inlined in the - // caller, and possibly avoid escaping to the heap. - var dst [32]byte - return x25519(&dst, scalar, point) -} - -func x25519(dst *[32]byte, scalar, point []byte) ([]byte, error) { - var in [32]byte - if l := len(scalar); l != 32 { - return nil, fmt.Errorf("bad scalar length: %d, expected %d", l, 32) - } - if l := len(point); l != 32 { - return nil, fmt.Errorf("bad point length: %d, expected %d", l, 32) - } - copy(in[:], scalar) - if &point[0] == &Basepoint[0] { - checkBasepoint() - ScalarBaseMult(dst, &in) - } else { - var base, zero [32]byte - copy(base[:], point) - ScalarMult(dst, &in, &base) - if subtle.ConstantTimeCompare(dst[:], zero[:]) == 1 { - return nil, fmt.Errorf("bad input point: low order point") - } - } - return dst[:], nil -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go deleted file mode 100644 index 84858480dff5..000000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build amd64 && gc && !purego -// +build amd64,gc,!purego - -package curve25519 - -// These functions are implemented in the .s files. The names of the functions -// in the rest of the file are also taken from the SUPERCOP sources to help -// people following along. - -//go:noescape - -func cswap(inout *[5]uint64, v uint64) - -//go:noescape - -func ladderstep(inout *[5][5]uint64) - -//go:noescape - -func freeze(inout *[5]uint64) - -//go:noescape - -func mul(dest, a, b *[5]uint64) - -//go:noescape - -func square(out, in *[5]uint64) - -// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. -func mladder(xr, zr *[5]uint64, s *[32]byte) { - var work [5][5]uint64 - - work[0] = *xr - setint(&work[1], 1) - setint(&work[2], 0) - work[3] = *xr - setint(&work[4], 1) - - j := uint(6) - var prevbit byte - - for i := 31; i >= 0; i-- { - for j < 8 { - bit := ((*s)[i] >> j) & 1 - swap := bit ^ prevbit - prevbit = bit - cswap(&work[1], uint64(swap)) - ladderstep(&work) - j-- - } - j = 7 - } - - *xr = work[1] - *zr = work[2] -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - copy(e[:], (*in)[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var t, z [5]uint64 - unpack(&t, base) - mladder(&t, &z, &e) - invert(&z, &z) - mul(&t, &t, &z) - pack(out, &t) -} - -func setint(r *[5]uint64, v uint64) { - r[0] = v - r[1] = 0 - r[2] = 0 - r[3] = 0 - r[4] = 0 -} - -// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian -// order. -func unpack(r *[5]uint64, x *[32]byte) { - r[0] = uint64(x[0]) | - uint64(x[1])<<8 | - uint64(x[2])<<16 | - uint64(x[3])<<24 | - uint64(x[4])<<32 | - uint64(x[5])<<40 | - uint64(x[6]&7)<<48 - - r[1] = uint64(x[6])>>3 | - uint64(x[7])<<5 | - uint64(x[8])<<13 | - uint64(x[9])<<21 | - uint64(x[10])<<29 | - uint64(x[11])<<37 | - uint64(x[12]&63)<<45 - - r[2] = uint64(x[12])>>6 | - uint64(x[13])<<2 | - uint64(x[14])<<10 | - uint64(x[15])<<18 | - uint64(x[16])<<26 | - uint64(x[17])<<34 | - uint64(x[18])<<42 | - uint64(x[19]&1)<<50 - - r[3] = uint64(x[19])>>1 | - uint64(x[20])<<7 | - uint64(x[21])<<15 | - uint64(x[22])<<23 | - uint64(x[23])<<31 | - uint64(x[24])<<39 | - uint64(x[25]&15)<<47 - - r[4] = uint64(x[25])>>4 | - uint64(x[26])<<4 | - uint64(x[27])<<12 | - uint64(x[28])<<20 | - uint64(x[29])<<28 | - uint64(x[30])<<36 | - uint64(x[31]&127)<<44 -} - -// pack sets out = x where out is the usual, little-endian form of the 5, -// 51-bit limbs in x. -func pack(out *[32]byte, x *[5]uint64) { - t := *x - freeze(&t) - - out[0] = byte(t[0]) - out[1] = byte(t[0] >> 8) - out[2] = byte(t[0] >> 16) - out[3] = byte(t[0] >> 24) - out[4] = byte(t[0] >> 32) - out[5] = byte(t[0] >> 40) - out[6] = byte(t[0] >> 48) - - out[6] ^= byte(t[1]<<3) & 0xf8 - out[7] = byte(t[1] >> 5) - out[8] = byte(t[1] >> 13) - out[9] = byte(t[1] >> 21) - out[10] = byte(t[1] >> 29) - out[11] = byte(t[1] >> 37) - out[12] = byte(t[1] >> 45) - - out[12] ^= byte(t[2]<<6) & 0xc0 - out[13] = byte(t[2] >> 2) - out[14] = byte(t[2] >> 10) - out[15] = byte(t[2] >> 18) - out[16] = byte(t[2] >> 26) - out[17] = byte(t[2] >> 34) - out[18] = byte(t[2] >> 42) - out[19] = byte(t[2] >> 50) - - out[19] ^= byte(t[3]<<1) & 0xfe - out[20] = byte(t[3] >> 7) - out[21] = byte(t[3] >> 15) - out[22] = byte(t[3] >> 23) - out[23] = byte(t[3] >> 31) - out[24] = byte(t[3] >> 39) - out[25] = byte(t[3] >> 47) - - out[25] ^= byte(t[4]<<4) & 0xf0 - out[26] = byte(t[4] >> 4) - out[27] = byte(t[4] >> 12) - out[28] = byte(t[4] >> 20) - out[29] = byte(t[4] >> 28) - out[30] = byte(t[4] >> 36) - out[31] = byte(t[4] >> 44) -} - -// invert calculates r = x^-1 mod p using Fermat's little theorem. -func invert(r *[5]uint64, x *[5]uint64) { - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 - - square(&z2, x) /* 2 */ - square(&t, &z2) /* 4 */ - square(&t, &t) /* 8 */ - mul(&z9, &t, x) /* 9 */ - mul(&z11, &z9, &z2) /* 11 */ - square(&t, &z11) /* 22 */ - mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ - - square(&t, &z2_5_0) /* 2^6 - 2^1 */ - for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ - - square(&t, &z2_10_0) /* 2^11 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ - - square(&t, &z2_20_0) /* 2^21 - 2^1 */ - for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ - square(&t, &t) - } - mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ - - square(&t, &t) /* 2^41 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ - square(&t, &t) - } - mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ - - square(&t, &z2_50_0) /* 2^51 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ - square(&t, &t) - } - mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ - - square(&t, &z2_100_0) /* 2^101 - 2^1 */ - for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ - square(&t, &t) - } - mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ - - square(&t, &t) /* 2^201 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ - square(&t, &t) - } - mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ - - square(&t, &t) /* 2^251 - 2^1 */ - square(&t, &t) /* 2^252 - 2^2 */ - square(&t, &t) /* 2^253 - 2^3 */ - - square(&t, &t) /* 2^254 - 2^4 */ - - square(&t, &t) /* 2^255 - 2^5 */ - mul(r, &t, &z11) /* 2^255 - 21 */ -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s b/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s deleted file mode 100644 index 6c533809266b..000000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_amd64.s +++ /dev/null @@ -1,1793 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html - -// +build amd64,gc,!purego - -#define REDMASK51 0x0007FFFFFFFFFFFF - -// These constants cannot be encoded in non-MOVQ immediates. -// We access them directly from memory instead. - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$0-8 - MOVQ inout+0(FP), DI - - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ $REDMASK51,AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - RET - -// func ladderstep(inout *[5][5]uint64) -TEXT ·ladderstep(SB),0,$296-8 - MOVQ inout+0(FP),DI - - MOVQ 40(DI),SI - MOVQ 48(DI),DX - MOVQ 56(DI),CX - MOVQ 64(DI),R8 - MOVQ 72(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 80(DI),SI - ADDQ 88(DI),DX - ADDQ 96(DI),CX - ADDQ 104(DI),R8 - ADDQ 112(DI),R9 - SUBQ 80(DI),AX - SUBQ 88(DI),R10 - SUBQ 96(DI),R11 - SUBQ 104(DI),R12 - SUBQ 112(DI),R13 - MOVQ SI,0(SP) - MOVQ DX,8(SP) - MOVQ CX,16(SP) - MOVQ R8,24(SP) - MOVQ R9,32(SP) - MOVQ AX,40(SP) - MOVQ R10,48(SP) - MOVQ R11,56(SP) - MOVQ R12,64(SP) - MOVQ R13,72(SP) - MOVQ 40(SP),AX - MULQ 40(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 40(SP),AX - SHLQ $1,AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 48(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 48(SP),AX - SHLQ $1,AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 48(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 56(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),DX - IMUL3Q $38,DX,AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 72(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(SP) - MOVQ R8,88(SP) - MOVQ R9,96(SP) - MOVQ AX,104(SP) - MOVQ R10,112(SP) - MOVQ 0(SP),AX - MULQ 0(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SP),AX - SHLQ $1,AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 8(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - SHLQ $1,AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 16(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 24(SP),DX - IMUL3Q $38,DX,AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 32(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(SP) - MOVQ R8,128(SP) - MOVQ R9,136(SP) - MOVQ AX,144(SP) - MOVQ R10,152(SP) - MOVQ SI,SI - MOVQ R8,DX - MOVQ R9,CX - MOVQ AX,R8 - MOVQ R10,R9 - ADDQ ·_2P0(SB),SI - ADDQ ·_2P1234(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R8 - ADDQ ·_2P1234(SB),R9 - SUBQ 80(SP),SI - SUBQ 88(SP),DX - SUBQ 96(SP),CX - SUBQ 104(SP),R8 - SUBQ 112(SP),R9 - MOVQ SI,160(SP) - MOVQ DX,168(SP) - MOVQ CX,176(SP) - MOVQ R8,184(SP) - MOVQ R9,192(SP) - MOVQ 120(DI),SI - MOVQ 128(DI),DX - MOVQ 136(DI),CX - MOVQ 144(DI),R8 - MOVQ 152(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 160(DI),SI - ADDQ 168(DI),DX - ADDQ 176(DI),CX - ADDQ 184(DI),R8 - ADDQ 192(DI),R9 - SUBQ 160(DI),AX - SUBQ 168(DI),R10 - SUBQ 176(DI),R11 - SUBQ 184(DI),R12 - SUBQ 192(DI),R13 - MOVQ SI,200(SP) - MOVQ DX,208(SP) - MOVQ CX,216(SP) - MOVQ R8,224(SP) - MOVQ R9,232(SP) - MOVQ AX,240(SP) - MOVQ R10,248(SP) - MOVQ R11,256(SP) - MOVQ R12,264(SP) - MOVQ R13,272(SP) - MOVQ 224(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,280(SP) - MULQ 56(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 232(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,288(SP) - MULQ 48(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 40(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 200(SP),AX - MULQ 48(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 200(SP),AX - MULQ 56(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 200(SP),AX - MULQ 64(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 200(SP),AX - MULQ 72(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 208(SP),AX - MULQ 40(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 48(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 208(SP),AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),AX - MULQ 40(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 216(SP),AX - MULQ 48(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 216(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 216(SP),DX - IMUL3Q $19,DX,AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 224(SP),AX - MULQ 40(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 224(SP),AX - MULQ 48(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 280(SP),AX - MULQ 64(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 280(SP),AX - MULQ 72(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 232(SP),AX - MULQ 40(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 288(SP),AX - MULQ 56(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 288(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 288(SP),AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(SP) - MOVQ R8,48(SP) - MOVQ R9,56(SP) - MOVQ AX,64(SP) - MOVQ R10,72(SP) - MOVQ 264(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,200(SP) - MULQ 16(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,208(SP) - MULQ 8(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 0(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 240(SP),AX - MULQ 8(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 240(SP),AX - MULQ 16(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 240(SP),AX - MULQ 24(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 240(SP),AX - MULQ 32(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 248(SP),AX - MULQ 0(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 248(SP),AX - MULQ 8(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 248(SP),AX - MULQ 16(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 248(SP),AX - MULQ 24(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 248(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 0(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 256(SP),AX - MULQ 8(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 256(SP),AX - MULQ 16(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 24(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),DX - IMUL3Q $19,DX,AX - MULQ 32(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 0(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 264(SP),AX - MULQ 8(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 200(SP),AX - MULQ 24(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 200(SP),AX - MULQ 32(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 272(SP),AX - MULQ 0(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 208(SP),AX - MULQ 16(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 208(SP),AX - MULQ 24(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 32(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,DX - MOVQ R8,CX - MOVQ R9,R11 - MOVQ AX,R12 - MOVQ R10,R13 - ADDQ ·_2P0(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 40(SP),SI - ADDQ 48(SP),R8 - ADDQ 56(SP),R9 - ADDQ 64(SP),AX - ADDQ 72(SP),R10 - SUBQ 40(SP),DX - SUBQ 48(SP),CX - SUBQ 56(SP),R11 - SUBQ 64(SP),R12 - SUBQ 72(SP),R13 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ DX,160(DI) - MOVQ CX,168(DI) - MOVQ R11,176(DI) - MOVQ R12,184(DI) - MOVQ R13,192(DI) - MOVQ 120(DI),AX - MULQ 120(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 128(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 136(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 144(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 152(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(DI),AX - MULQ 128(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 136(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 144(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),AX - MULQ 136(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 144(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $19,DX,AX - MULQ 144(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(DI),DX - IMUL3Q $19,DX,AX - MULQ 152(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ 160(DI),AX - MULQ 160(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 168(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 176(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 184(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 192(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 168(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 176(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 184(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 176(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 184(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 184(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 16(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 0(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 8(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - MULQ 16(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - MULQ 24(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - MULQ 32(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 0(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 168(DI),AX - MULQ 8(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - MULQ 16(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - MULQ 24(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 0(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 176(DI),AX - MULQ 8(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 176(DI),AX - MULQ 16(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 24(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),AX - MULQ 0(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(DI),AX - MULQ 8(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 24(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 32(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),AX - MULQ 0(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 16(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 24(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 32(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 144(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 96(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 152(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 120(SP),AX - MULQ 88(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(SP),AX - MULQ 96(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(SP),AX - MULQ 104(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(SP),AX - MULQ 112(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(SP),AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 128(SP),AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(SP),AX - MULQ 96(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(SP),AX - MULQ 104(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),AX - MULQ 80(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 136(SP),AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 136(SP),AX - MULQ 96(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 104(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(SP),DX - IMUL3Q $19,DX,AX - MULQ 112(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(SP),AX - MULQ 80(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 144(SP),AX - MULQ 88(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 104(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 112(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(SP),AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 96(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(DI) - MOVQ R8,48(DI) - MOVQ R9,56(DI) - MOVQ AX,64(DI) - MOVQ R10,72(DI) - MOVQ 160(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - MOVQ AX,SI - MOVQ DX,CX - MOVQ 168(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,CX - MOVQ DX,R8 - MOVQ 176(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R8 - MOVQ DX,R9 - MOVQ 184(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R9 - MOVQ DX,R10 - MOVQ 192(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R10 - IMUL3Q $19,DX,DX - ADDQ DX,SI - ADDQ 80(SP),SI - ADDQ 88(SP),CX - ADDQ 96(SP),R8 - ADDQ 104(SP),R9 - ADDQ 112(SP),R10 - MOVQ SI,80(DI) - MOVQ CX,88(DI) - MOVQ R8,96(DI) - MOVQ R9,104(DI) - MOVQ R10,112(DI) - MOVQ 104(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,0(SP) - MULQ 176(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 112(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 168(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 160(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 168(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 80(DI),AX - MULQ 176(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 80(DI),AX - MULQ 184(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 80(DI),AX - MULQ 192(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 88(DI),AX - MULQ 160(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 88(DI),AX - MULQ 168(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(DI),AX - MULQ 176(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 88(DI),AX - MULQ 184(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 88(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),AX - MULQ 160(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 96(DI),AX - MULQ 168(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 96(DI),AX - MULQ 176(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 104(DI),AX - MULQ 160(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(DI),AX - MULQ 168(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 0(SP),AX - MULQ 184(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SP),AX - MULQ 192(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 112(DI),AX - MULQ 160(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SP),AX - MULQ 176(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 8(SP),AX - MULQ 184(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 192(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ $REDMASK51,DX - SHLQ $13,SI,CX - ANDQ DX,SI - SHLQ $13,R8,R9 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R10,R11 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(DI) - MOVQ R8,88(DI) - MOVQ R9,96(DI) - MOVQ AX,104(DI) - MOVQ R10,112(DI) - RET - -// func cswap(inout *[4][5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - SUBQ $1, SI - NOTQ SI - MOVQ SI, X15 - PSHUFD $0x44, X15, X15 - - MOVOU 0(DI), X0 - MOVOU 16(DI), X2 - MOVOU 32(DI), X4 - MOVOU 48(DI), X6 - MOVOU 64(DI), X8 - MOVOU 80(DI), X1 - MOVOU 96(DI), X3 - MOVOU 112(DI), X5 - MOVOU 128(DI), X7 - MOVOU 144(DI), X9 - - MOVO X1, X10 - MOVO X3, X11 - MOVO X5, X12 - MOVO X7, X13 - MOVO X9, X14 - - PXOR X0, X10 - PXOR X2, X11 - PXOR X4, X12 - PXOR X6, X13 - PXOR X8, X14 - PAND X15, X10 - PAND X15, X11 - PAND X15, X12 - PAND X15, X13 - PAND X15, X14 - PXOR X10, X0 - PXOR X10, X1 - PXOR X11, X2 - PXOR X11, X3 - PXOR X12, X4 - PXOR X12, X5 - PXOR X13, X6 - PXOR X13, X7 - PXOR X14, X8 - PXOR X14, X9 - - MOVOU X0, 0(DI) - MOVOU X2, 16(DI) - MOVOU X4, 32(DI) - MOVOU X6, 48(DI) - MOVOU X8, 64(DI) - MOVOU X1, 80(DI) - MOVOU X3, 96(DI) - MOVOU X5, 112(DI) - MOVOU X7, 128(DI) - MOVOU X9, 144(DI) - RET - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$16-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,0(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,8(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 0(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 0(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ $REDMASK51,SI - SHLQ $13,R8,R9 - ANDQ SI,R8 - SHLQ $13,R10,R11 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R12,R13 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R14,R15 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BX,BP - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$0-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ $REDMASK51,SI - SHLQ $13,CX,R8 - ANDQ SI,CX - SHLQ $13,R9,R10 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R11,R12 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R13,R14 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,R15,BX - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go b/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go deleted file mode 100644 index c43b13fc83e7..000000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_generic.go +++ /dev/null @@ -1,828 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package curve25519 - -import "encoding/binary" - -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - b = -b - for i := range f { - t := b & (f[i] ^ g[i]) - f[i] ^= t - g[i] ^= t - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - return int64(binary.LittleEndian.Uint32(in)) -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 0x7fffff) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feMul121666 calculates h = f * 121666. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) -} - -func scalarMultGeneric(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) -} diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go b/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go deleted file mode 100644 index 259728af7dad..000000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519_noasm.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !amd64 || !gc || purego -// +build !amd64 !gc purego - -package curve25519 - -func scalarMult(out, in, base *[32]byte) { - scalarMultGeneric(out, in, base) -} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing.go deleted file mode 100644 index 4fad24f8dcde..000000000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !purego -// +build !purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -import "unsafe" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - uintptr(unsafe.Pointer(&x[0])) <= uintptr(unsafe.Pointer(&y[len(y)-1])) && - uintptr(unsafe.Pointer(&y[0])) <= uintptr(unsafe.Pointer(&x[len(x)-1])) -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go b/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go deleted file mode 100644 index 80ccbed2c0de..000000000000 --- a/vendor/golang.org/x/crypto/internal/subtle/aliasing_purego.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego -// +build purego - -// Package subtle implements functions that are often useful in cryptographic -// code but require careful thought to use correctly. -package subtle // import "golang.org/x/crypto/internal/subtle" - -// This is the Google App Engine standard variant based on reflect -// because the unsafe package and cgo are disallowed. - -import "reflect" - -// AnyOverlap reports whether x and y share memory at any (not necessarily -// corresponding) index. The memory beyond the slice length is ignored. -func AnyOverlap(x, y []byte) bool { - return len(x) > 0 && len(y) > 0 && - reflect.ValueOf(&x[0]).Pointer() <= reflect.ValueOf(&y[len(y)-1]).Pointer() && - reflect.ValueOf(&y[0]).Pointer() <= reflect.ValueOf(&x[len(x)-1]).Pointer() -} - -// InexactOverlap reports whether x and y share memory at any non-corresponding -// index. The memory beyond the slice length is ignored. Note that x and y can -// have different lengths and still not have any inexact overlap. -// -// InexactOverlap can be used to implement the requirements of the crypto/cipher -// AEAD, Block, BlockMode and Stream interfaces. -func InexactOverlap(x, y []byte) bool { - if len(x) == 0 || len(y) == 0 || &x[0] == &y[0] { - return false - } - return AnyOverlap(x, y) -} diff --git a/vendor/golang.org/x/crypto/nacl/sign/sign.go b/vendor/golang.org/x/crypto/nacl/sign/sign.go deleted file mode 100644 index d07627019ef5..000000000000 --- a/vendor/golang.org/x/crypto/nacl/sign/sign.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package sign signs small messages using public-key cryptography. -// -// Sign uses Ed25519 to sign messages. The length of messages is not hidden. -// Messages should be small because: -// 1. The whole message needs to be held in memory to be processed. -// 2. Using large messages pressures implementations on small machines to process -// plaintext without verifying the signature. This is very dangerous, and this API -// discourages it, but a protocol that uses excessive message sizes might present -// some implementations with no other choice. -// 3. Performance may be improved by working with messages that fit into data caches. -// Thus large amounts of data should be chunked so that each message is small. -// -// This package is not interoperable with the current release of NaCl -// (https://nacl.cr.yp.to/sign.html), which does not support Ed25519 yet. However, -// it is compatible with the NaCl fork libsodium (https://www.libsodium.org), as well -// as TweetNaCl (https://tweetnacl.cr.yp.to/). -package sign - -import ( - "io" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/internal/subtle" -) - -// Overhead is the number of bytes of overhead when signing a message. -const Overhead = 64 - -// GenerateKey generates a new public/private key pair suitable for use with -// Sign and Open. -func GenerateKey(rand io.Reader) (publicKey *[32]byte, privateKey *[64]byte, err error) { - pub, priv, err := ed25519.GenerateKey(rand) - if err != nil { - return nil, nil, err - } - publicKey, privateKey = new([32]byte), new([64]byte) - copy((*publicKey)[:], pub) - copy((*privateKey)[:], priv) - return publicKey, privateKey, nil -} - -// Sign appends a signed copy of message to out, which will be Overhead bytes -// longer than the original and must not overlap it. -func Sign(out, message []byte, privateKey *[64]byte) []byte { - sig := ed25519.Sign(ed25519.PrivateKey((*privateKey)[:]), message) - ret, out := sliceForAppend(out, Overhead+len(message)) - if subtle.AnyOverlap(out, message) { - panic("nacl: invalid buffer overlap") - } - copy(out, sig) - copy(out[Overhead:], message) - return ret -} - -// Open verifies a signed message produced by Sign and appends the message to -// out, which must not overlap the signed message. The output will be Overhead -// bytes smaller than the signed message. -func Open(out, signedMessage []byte, publicKey *[32]byte) ([]byte, bool) { - if len(signedMessage) < Overhead { - return nil, false - } - if !ed25519.Verify(ed25519.PublicKey((*publicKey)[:]), signedMessage[Overhead:], signedMessage[:Overhead]) { - return nil, false - } - ret, out := sliceForAppend(out, len(signedMessage)-Overhead) - if subtle.AnyOverlap(out, signedMessage) { - panic("nacl: invalid buffer overlap") - } - copy(out, signedMessage[Overhead:]) - return ret, true -} - -// sliceForAppend takes a slice and a requested number of bytes. It returns a -// slice with the contents of the given slice followed by that many bytes and a -// second slice that aliases into it and contains only the extra bytes. If the -// original slice has sufficient capacity then no allocation is performed. -func sliceForAppend(in []byte, n int) (head, tail []byte) { - if total := len(in) + n; cap(in) >= total { - head = in[:total] - } else { - head = make([]byte, total) - copy(head, in) - } - tail = head[len(in):] - return -} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_compat.go b/vendor/golang.org/x/crypto/poly1305/bits_compat.go deleted file mode 100644 index 45b5c966b2be..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/bits_compat.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 -// +build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go b/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go deleted file mode 100644 index ed52b3418ab5..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/bits_go1.13.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 -// +build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/poly1305/mac_noasm.go deleted file mode 100644 index f184b67d98db..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/mac_noasm.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build (!amd64 && !ppc64le && !s390x) || !gc || purego -// +build !amd64,!ppc64le,!s390x !gc purego - -package poly1305 - -type mac struct{ macGeneric } diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go deleted file mode 100644 index 9d7a6af09feb..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/poly1305.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package poly1305 implements Poly1305 one-time message authentication code as -// specified in https://cr.yp.to/mac/poly1305-20050329.pdf. -// -// Poly1305 is a fast, one-time authentication function. It is infeasible for an -// attacker to generate an authenticator for a message without the key. However, a -// key must only be used for a single message. Authenticating two different -// messages with the same key allows an attacker to forge authenticators for other -// messages with the same key. -// -// Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was -// used with a fixed key in order to generate one-time keys from an nonce. -// However, in this package AES isn't used and the one-time key is specified -// directly. -package poly1305 // import "golang.org/x/crypto/poly1305" - -import "crypto/subtle" - -// TagSize is the size, in bytes, of a poly1305 authenticator. -const TagSize = 16 - -// Sum generates an authenticator for msg using a one-time key and puts the -// 16-byte result into out. Authenticating two different messages with the same -// key allows an attacker to forge messages at will. -func Sum(out *[16]byte, m []byte, key *[32]byte) { - h := New(key) - h.Write(m) - h.Sum(out[:0]) -} - -// Verify returns true if mac is a valid authenticator for m with the given key. -func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { - var tmp [16]byte - Sum(&tmp, m, key) - return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 -} - -// New returns a new MAC computing an authentication -// tag of all data written to it with the given key. -// This allows writing the message progressively instead -// of passing it as a single slice. Common users should use -// the Sum function instead. -// -// The key must be unique for each message, as authenticating -// two different messages with the same key allows an attacker -// to forge messages at will. -func New(key *[32]byte) *MAC { - m := &MAC{} - initialize(key, &m.macState) - return m -} - -// MAC is an io.Writer computing an authentication tag -// of the data written to it. -// -// MAC cannot be used like common hash.Hash implementations, -// because using a poly1305 key twice breaks its security. -// Therefore writing data to a running MAC after calling -// Sum or Verify causes it to panic. -type MAC struct { - mac // platform-dependent implementation - - finalized bool -} - -// Size returns the number of bytes Sum will return. -func (h *MAC) Size() int { return TagSize } - -// Write adds more data to the running message authentication code. -// It never returns an error. -// -// It must not be called after the first call of Sum or Verify. -func (h *MAC) Write(p []byte) (n int, err error) { - if h.finalized { - panic("poly1305: write to MAC after Sum or Verify") - } - return h.mac.Write(p) -} - -// Sum computes the authenticator of all data written to the -// message authentication code. -func (h *MAC) Sum(b []byte) []byte { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return append(b, mac[:]...) -} - -// Verify returns whether the authenticator of all data written to -// the message authentication code matches the expected value. -func (h *MAC) Verify(expected []byte) bool { - var mac [TagSize]byte - h.mac.Sum(&mac) - h.finalized = true - return subtle.ConstantTimeCompare(expected, mac[:]) == 1 -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go deleted file mode 100644 index 6d522333f29e..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s deleted file mode 100644 index 2cb03731408c..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc,!purego - -#include "textflag.h" - -#define POLY1305_ADD(msg, h0, h1, h2) \ - ADDQ 0(msg), h0; \ - ADCQ 8(msg), h1; \ - ADCQ $1, h2; \ - LEAQ 16(msg), msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ - MOVQ r0, AX; \ - MULQ h0; \ - MOVQ AX, t0; \ - MOVQ DX, t1; \ - MOVQ r0, AX; \ - MULQ h1; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ r0, t2; \ - IMULQ h2, t2; \ - ADDQ DX, t2; \ - \ - MOVQ r1, AX; \ - MULQ h0; \ - ADDQ AX, t1; \ - ADCQ $0, DX; \ - MOVQ DX, h0; \ - MOVQ r1, t3; \ - IMULQ h2, t3; \ - MOVQ r1, AX; \ - MULQ h1; \ - ADDQ AX, t2; \ - ADCQ DX, t3; \ - ADDQ h0, t2; \ - ADCQ $0, t3; \ - \ - MOVQ t0, h0; \ - MOVQ t1, h1; \ - MOVQ t2, h2; \ - ANDQ $3, h2; \ - MOVQ t2, t0; \ - ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ - ADDQ t0, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2; \ - SHRQ $2, t3, t2; \ - SHRQ $2, t3; \ - ADDQ t2, h0; \ - ADCQ t3, h1; \ - ADCQ $0, h2 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVQ state+0(FP), DI - MOVQ msg_base+8(FP), SI - MOVQ msg_len+16(FP), R15 - - MOVQ 0(DI), R8 // h0 - MOVQ 8(DI), R9 // h1 - MOVQ 16(DI), R10 // h2 - MOVQ 24(DI), R11 // r0 - MOVQ 32(DI), R12 // r1 - - CMPQ R15, $16 - JB bytes_between_0_and_15 - -loop: - POLY1305_ADD(SI, R8, R9, R10) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) - SUBQ $16, R15 - CMPQ R15, $16 - JAE loop - -bytes_between_0_and_15: - TESTQ R15, R15 - JZ done - MOVQ $1, BX - XORQ CX, CX - XORQ R13, R13 - ADDQ R15, SI - -flush_buffer: - SHLQ $8, BX, CX - SHLQ $8, BX - MOVB -1(SI), R13 - XORQ R13, BX - DECQ SI - DECQ R15 - JNZ flush_buffer - - ADDQ BX, R8 - ADCQ CX, R9 - ADCQ $0, R10 - MOVQ $16, R15 - JMP multiply - -done: - MOVQ R8, 0(DI) - MOVQ R9, 8(DI) - MOVQ R10, 16(DI) - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_generic.go b/vendor/golang.org/x/crypto/poly1305/sum_generic.go deleted file mode 100644 index c942a65904fa..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_generic.go +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file provides the generic implementation of Sum and MAC. Other files -// might provide optimized assembly implementations of some of this code. - -package poly1305 - -import "encoding/binary" - -// Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag -// for a 64 bytes message is approximately -// -// s + m[0:16] * r⁴ + m[16:32] * r³ + m[32:48] * r² + m[48:64] * r mod 2¹³⁰ - 5 -// -// for some secret r and s. It can be computed sequentially like -// -// for len(msg) > 0: -// h += read(msg, 16) -// h *= r -// h %= 2¹³⁰ - 5 -// return h + s -// -// All the complexity is about doing performant constant-time math on numbers -// larger than any available numeric type. - -func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) { - h := newMACGeneric(key) - h.Write(msg) - h.Sum(out) -} - -func newMACGeneric(key *[32]byte) macGeneric { - m := macGeneric{} - initialize(key, &m.macState) - return m -} - -// macState holds numbers in saturated 64-bit little-endian limbs. That is, -// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸. -type macState struct { - // h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but - // can grow larger during and after rounds. It must, however, remain below - // 2 * (2¹³⁰ - 5). - h [3]uint64 - // r and s are the private key components. - r [2]uint64 - s [2]uint64 -} - -type macGeneric struct { - macState - - buffer [TagSize]byte - offset int -} - -// Write splits the incoming message into TagSize chunks, and passes them to -// update. It buffers incomplete chunks. -func (h *macGeneric) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - updateGeneric(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - updateGeneric(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -// Sum flushes the last incomplete chunk from the buffer, if any, and generates -// the MAC output. It does not modify its state, in order to allow for multiple -// calls to Sum, even if no Write is allowed after Sum. -func (h *macGeneric) Sum(out *[TagSize]byte) { - state := h.macState - if h.offset > 0 { - updateGeneric(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} - -// [rMask0, rMask1] is the specified Poly1305 clamping mask in little-endian. It -// clears some bits of the secret coefficient to make it possible to implement -// multiplication more efficiently. -const ( - rMask0 = 0x0FFFFFFC0FFFFFFF - rMask1 = 0x0FFFFFFC0FFFFFFC -) - -// initialize loads the 256-bit key into the two 128-bit secret values r and s. -func initialize(key *[32]byte, m *macState) { - m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0 - m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1 - m.s[0] = binary.LittleEndian.Uint64(key[16:24]) - m.s[1] = binary.LittleEndian.Uint64(key[24:32]) -} - -// uint128 holds a 128-bit number as two 64-bit limbs, for use with the -// bits.Mul64 and bits.Add64 intrinsics. -type uint128 struct { - lo, hi uint64 -} - -func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) - return uint128{lo, hi} -} - -func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) - if c != 0 { - panic("poly1305: unexpected overflow") - } - return uint128{lo, hi} -} - -func shiftRightBy2(a uint128) uint128 { - a.lo = a.lo>>2 | (a.hi&3)<<62 - a.hi = a.hi >> 2 - return a -} - -// updateGeneric absorbs msg into the state.h accumulator. For each chunk m of -// 128 bits of message, it computes -// -// h₊ = (h + m) * r mod 2¹³⁰ - 5 -// -// If the msg length is not a multiple of TagSize, it assumes the last -// incomplete chunk is the final one. -func updateGeneric(state *macState, msg []byte) { - h0, h1, h2 := state.h[0], state.h[1], state.h[2] - r0, r1 := state.r[0], state.r[1] - - for len(msg) > 0 { - var c uint64 - - // For the first step, h + m, we use a chain of bits.Add64 intrinsics. - // The resulting value of h might exceed 2¹³⁰ - 5, but will be partially - // reduced at the end of the multiplication below. - // - // The spec requires us to set a bit just above the message size, not to - // hide leading zeroes. For full chunks, that's 1 << 128, so we can just - // add 1 to the most significant (2¹²⁸) limb, h2. - if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) - h2 += c + 1 - - msg = msg[TagSize:] - } else { - var buf [TagSize]byte - copy(buf[:], msg) - buf[len(msg)] = 1 - - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) - h2 += c - - msg = nil - } - - // Multiplication of big number limbs is similar to elementary school - // columnar multiplication. Instead of digits, there are 64-bit limbs. - // - // We are multiplying a 3 limbs number, h, by a 2 limbs number, r. - // - // h2 h1 h0 x - // r1 r0 = - // ---------------- - // h2r0 h1r0 h0r0 <-- individual 128-bit products - // + h2r1 h1r1 h0r1 - // ------------------------ - // m3 m2 m1 m0 <-- result in 128-bit overlapping limbs - // ------------------------ - // m3.hi m2.hi m1.hi m0.hi <-- carry propagation - // + m3.lo m2.lo m1.lo m0.lo - // ------------------------------- - // t4 t3 t2 t1 t0 <-- final result in 64-bit limbs - // - // The main difference from pen-and-paper multiplication is that we do - // carry propagation in a separate step, as if we wrote two digit sums - // at first (the 128-bit limbs), and then carried the tens all at once. - - h0r0 := mul64(h0, r0) - h1r0 := mul64(h1, r0) - h2r0 := mul64(h2, r0) - h0r1 := mul64(h0, r1) - h1r1 := mul64(h1, r1) - h2r1 := mul64(h2, r1) - - // Since h2 is known to be at most 7 (5 + 1 + 1), and r0 and r1 have their - // top 4 bits cleared by rMask{0,1}, we know that their product is not going - // to overflow 64 bits, so we can ignore the high part of the products. - // - // This also means that the product doesn't have a fifth limb (t4). - if h2r0.hi != 0 { - panic("poly1305: unexpected overflow") - } - if h2r1.hi != 0 { - panic("poly1305: unexpected overflow") - } - - m0 := h0r0 - m1 := add128(h1r0, h0r1) // These two additions don't overflow thanks again - m2 := add128(h2r0, h1r1) // to the 4 masked bits at the top of r0 and r1. - m3 := h2r1 - - t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) - - // Now we have the result as 4 64-bit limbs, and we need to reduce it - // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do - // a cheap partial reduction according to the reduction identity - // - // c * 2¹³⁰ + n = c * 5 + n mod 2¹³⁰ - 5 - // - // because 2¹³⁰ = 5 mod 2¹³⁰ - 5. Partial reduction since the result is - // likely to be larger than 2¹³⁰ - 5, but still small enough to fit the - // assumptions we make about h in the rest of the code. - // - // See also https://speakerdeck.com/gtank/engineering-prime-numbers?slide=23 - - // We split the final result at the 2¹³⁰ mark into h and cc, the carry. - // Note that the carry bits are effectively shifted left by 2, in other - // words, cc = c * 4 for the c in the reduction identity. - h0, h1, h2 = t0, t1, t2&maskLow2Bits - cc := uint128{t2 & maskNotLow2Bits, t3} - - // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - cc = shiftRightBy2(cc) - - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) - h2 += c - - // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most - // - // 5 * 2¹²⁸ + (2¹²⁸ - 1) = 6 * 2¹²⁸ - 1 - } - - state.h[0], state.h[1], state.h[2] = h0, h1, h2 -} - -const ( - maskLow2Bits uint64 = 0x0000000000000003 - maskNotLow2Bits uint64 = ^maskLow2Bits -) - -// select64 returns x if v == 1 and y if v == 0, in constant time. -func select64(v, x, y uint64) uint64 { return ^(v-1)&x | (v-1)&y } - -// [p0, p1, p2] is 2¹³⁰ - 5 in little endian order. -const ( - p0 = 0xFFFFFFFFFFFFFFFB - p1 = 0xFFFFFFFFFFFFFFFF - p2 = 0x0000000000000003 -) - -// finalize completes the modular reduction of h and computes -// -// out = h + s mod 2¹²⁸ -// -func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { - h0, h1, h2 := h[0], h[1], h[2] - - // After the partial reduction in updateGeneric, h might be more than - // 2¹³⁰ - 5, but will be less than 2 * (2¹³⁰ - 5). To complete the reduction - // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the - // result if the subtraction underflows, and t otherwise. - - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) - - // h = h if h < p else h - p - h0 = select64(b, h0, hMinusP0) - h1 = select64(b, h1, hMinusP1) - - // Finally, we compute the last Poly1305 step - // - // tag = h + s mod 2¹²⁸ - // - // by just doing a wide addition with the 128 low bits of h and discarding - // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) - - binary.LittleEndian.PutUint64(out[0:8], h0) - binary.LittleEndian.PutUint64(out[8:16], h1) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go deleted file mode 100644 index 4a069941a6ef..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -//go:noescape -func update(state *macState, msg []byte) - -// mac is a wrapper for macGeneric that redirects calls that would have gone to -// updateGeneric to update. -// -// Its Write and Sum methods are otherwise identical to the macGeneric ones, but -// using function pointers would carry a major performance cost. -type mac struct{ macGeneric } - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < TagSize { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - update(&h.macState, h.buffer[:]) - } - if n := len(p) - (len(p) % TagSize); n > 0 { - update(&h.macState, p[:n]) - p = p[n:] - } - if len(p) > 0 { - h.offset += copy(h.buffer[h.offset:], p) - } - return nn, nil -} - -func (h *mac) Sum(out *[16]byte) { - state := h.macState - if h.offset > 0 { - update(&state, h.buffer[:h.offset]) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s b/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s deleted file mode 100644 index 5cd7494b21a7..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc,!purego - -#include "textflag.h" - -// This was ported from the amd64 implementation. - -#define POLY1305_ADD(msg, h0, h1, h2, t0, t1, t2) \ - MOVD (msg), t0; \ - MOVD 8(msg), t1; \ - MOVD $1, t2; \ - ADDC t0, h0, h0; \ - ADDE t1, h1, h1; \ - ADDE t2, h2; \ - ADD $16, msg - -#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3, t4, t5) \ - MULLD r0, h0, t0; \ - MULLD r0, h1, t4; \ - MULHDU r0, h0, t1; \ - MULHDU r0, h1, t5; \ - ADDC t4, t1, t1; \ - MULLD r0, h2, t2; \ - ADDZE t5; \ - MULHDU r1, h0, t4; \ - MULLD r1, h0, h0; \ - ADD t5, t2, t2; \ - ADDC h0, t1, t1; \ - MULLD h2, r1, t3; \ - ADDZE t4, h0; \ - MULHDU r1, h1, t5; \ - MULLD r1, h1, t4; \ - ADDC t4, t2, t2; \ - ADDE t5, t3, t3; \ - ADDC h0, t2, t2; \ - MOVD $-4, t4; \ - MOVD t0, h0; \ - MOVD t1, h1; \ - ADDZE t3; \ - ANDCC $3, t2, h2; \ - AND t2, t4, t0; \ - ADDC t0, h0, h0; \ - ADDE t3, h1, h1; \ - SLD $62, t3, t4; \ - SRD $2, t2; \ - ADDZE h2; \ - OR t4, t2, t2; \ - SRD $2, t3; \ - ADDC t2, h0, h0; \ - ADDE t3, h1, h1; \ - ADDZE h2 - -DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF -DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC -GLOBL ·poly1305Mask<>(SB), RODATA, $16 - -// func update(state *[7]uint64, msg []byte) -TEXT ·update(SB), $0-32 - MOVD state+0(FP), R3 - MOVD msg_base+8(FP), R4 - MOVD msg_len+16(FP), R5 - - MOVD 0(R3), R8 // h0 - MOVD 8(R3), R9 // h1 - MOVD 16(R3), R10 // h2 - MOVD 24(R3), R11 // r0 - MOVD 32(R3), R12 // r1 - - CMP R5, $16 - BLT bytes_between_0_and_15 - -loop: - POLY1305_ADD(R4, R8, R9, R10, R20, R21, R22) - -multiply: - POLY1305_MUL(R8, R9, R10, R11, R12, R16, R17, R18, R14, R20, R21) - ADD $-16, R5 - CMP R5, $16 - BGE loop - -bytes_between_0_and_15: - CMP $0, R5 - BEQ done - MOVD $0, R16 // h0 - MOVD $0, R17 // h1 - -flush_buffer: - CMP R5, $8 - BLE just1 - - MOVD $8, R21 - SUB R21, R5, R21 - - // Greater than 8 -- load the rightmost remaining bytes in msg - // and put into R17 (h1) - MOVD (R4)(R21), R17 - MOVD $16, R22 - - // Find the offset to those bytes - SUB R5, R22, R22 - SLD $3, R22 - - // Shift to get only the bytes in msg - SRD R22, R17, R17 - - // Put 1 at high end - MOVD $1, R23 - SLD $3, R21 - SLD R21, R23, R23 - OR R23, R17, R17 - - // Remainder is 8 - MOVD $8, R5 - -just1: - CMP R5, $8 - BLT less8 - - // Exactly 8 - MOVD (R4), R16 - - CMP $0, R17 - - // Check if we've already set R17; if not - // set 1 to indicate end of msg. - BNE carry - MOVD $1, R17 - BR carry - -less8: - MOVD $0, R16 // h0 - MOVD $0, R22 // shift count - CMP R5, $4 - BLT less4 - MOVWZ (R4), R16 - ADD $4, R4 - ADD $-4, R5 - MOVD $32, R22 - -less4: - CMP R5, $2 - BLT less2 - MOVHZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $16, R22 - ADD $-2, R5 - ADD $2, R4 - -less2: - CMP $0, R5 - BEQ insert1 - MOVBZ (R4), R21 - SLD R22, R21, R21 - OR R16, R21, R16 - ADD $8, R22 - -insert1: - // Insert 1 at end of msg - MOVD $1, R21 - SLD R22, R21, R21 - OR R16, R21, R16 - -carry: - // Add new values to h0, h1, h2 - ADDC R16, R8 - ADDE R17, R9 - ADDE $0, R10 - MOVD $16, R5 - ADD R5, R4 - BR multiply - -done: - // Save h0, h1, h2 in state - MOVD R8, 0(R3) - MOVD R9, 8(R3) - MOVD R10, 16(R3) - RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go b/vendor/golang.org/x/crypto/poly1305/sum_s390x.go deleted file mode 100644 index 62cc9f84709e..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build gc && !purego -// +build gc,!purego - -package poly1305 - -import ( - "golang.org/x/sys/cpu" -) - -// updateVX is an assembly implementation of Poly1305 that uses vector -// instructions. It must only be called if the vector facility (vx) is -// available. -//go:noescape -func updateVX(state *macState, msg []byte) - -// mac is a replacement for macGeneric that uses a larger buffer and redirects -// calls that would have gone to updateGeneric to updateVX if the vector -// facility is installed. -// -// A larger buffer is required for good performance because the vector -// implementation has a higher fixed cost per call than the generic -// implementation. -type mac struct { - macState - - buffer [16 * TagSize]byte // size must be a multiple of block size (16) - offset int -} - -func (h *mac) Write(p []byte) (int, error) { - nn := len(p) - if h.offset > 0 { - n := copy(h.buffer[h.offset:], p) - if h.offset+n < len(h.buffer) { - h.offset += n - return nn, nil - } - p = p[n:] - h.offset = 0 - if cpu.S390X.HasVX { - updateVX(&h.macState, h.buffer[:]) - } else { - updateGeneric(&h.macState, h.buffer[:]) - } - } - - tail := len(p) % len(h.buffer) // number of bytes to copy into buffer - body := len(p) - tail // number of bytes to process now - if body > 0 { - if cpu.S390X.HasVX { - updateVX(&h.macState, p[:body]) - } else { - updateGeneric(&h.macState, p[:body]) - } - } - h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0 - return nn, nil -} - -func (h *mac) Sum(out *[TagSize]byte) { - state := h.macState - remainder := h.buffer[:h.offset] - - // Use the generic implementation if we have 2 or fewer blocks left - // to sum. The vector implementation has a higher startup time. - if cpu.S390X.HasVX && len(remainder) > 2*TagSize { - updateVX(&state, remainder) - } else if len(remainder) > 0 { - updateGeneric(&state, remainder) - } - finalize(out, &state.h, &state.s) -} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s b/vendor/golang.org/x/crypto/poly1305/sum_s390x.s deleted file mode 100644 index bdd882c606de..000000000000 --- a/vendor/golang.org/x/crypto/poly1305/sum_s390x.s +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build gc,!purego - -#include "textflag.h" - -// This implementation of Poly1305 uses the vector facility (vx) -// to process up to 2 blocks (32 bytes) per iteration using an -// algorithm based on the one described in: -// -// NEON crypto, Daniel J. Bernstein & Peter Schwabe -// https://cryptojedi.org/papers/neoncrypto-20120320.pdf -// -// This algorithm uses 5 26-bit limbs to represent a 130-bit -// value. These limbs are, for the most part, zero extended and -// placed into 64-bit vector register elements. Each vector -// register is 128-bits wide and so holds 2 of these elements. -// Using 26-bit limbs allows us plenty of headroom to accomodate -// accumulations before and after multiplication without -// overflowing either 32-bits (before multiplication) or 64-bits -// (after multiplication). -// -// In order to parallelise the operations required to calculate -// the sum we use two separate accumulators and then sum those -// in an extra final step. For compatibility with the generic -// implementation we perform this summation at the end of every -// updateVX call. -// -// To use two accumulators we must multiply the message blocks -// by r² rather than r. Only the final message block should be -// multiplied by r. -// -// Example: -// -// We want to calculate the sum (h) for a 64 byte message (m): -// -// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r -// -// To do this we split the calculation into the even indices -// and odd indices of the message. These form our SIMD 'lanes': -// -// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0 -// m[16:32]r³ + m[48:64]r <- lane 1 -// -// To calculate this iteratively we refactor so that both lanes -// are written in terms of r² and r: -// -// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0 -// (m[16:32]r² + m[48:64])r <- lane 1 -// ^ ^ -// | coefficients for second iteration -// coefficients for first iteration -// -// So in this case we would have two iterations. In the first -// both lanes are multiplied by r². In the second only the -// first lane is multiplied by r² and the second lane is -// instead multiplied by r. This gives use the odd and even -// powers of r that we need from the original equation. -// -// Notation: -// -// h - accumulator -// r - key -// m - message -// -// [a, b] - SIMD register holding two 64-bit values -// [a, b, c, d] - SIMD register holding four 32-bit values -// xᵢ[n] - limb n of variable x with bit width i -// -// Limbs are expressed in little endian order, so for 26-bit -// limbs x₂₆[4] will be the most significant limb and x₂₆[0] -// will be the least significant limb. - -// masking constants -#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits -#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits - -// expansion constants (see EXPAND macro) -#define EX0 V2 -#define EX1 V3 -#define EX2 V4 - -// key (r², r or 1 depending on context) -#define R_0 V5 -#define R_1 V6 -#define R_2 V7 -#define R_3 V8 -#define R_4 V9 - -// precalculated coefficients (5r², 5r or 0 depending on context) -#define R5_1 V10 -#define R5_2 V11 -#define R5_3 V12 -#define R5_4 V13 - -// message block (m) -#define M_0 V14 -#define M_1 V15 -#define M_2 V16 -#define M_3 V17 -#define M_4 V18 - -// accumulator (h) -#define H_0 V19 -#define H_1 V20 -#define H_2 V21 -#define H_3 V22 -#define H_4 V23 - -// temporary registers (for short-lived values) -#define T_0 V24 -#define T_1 V25 -#define T_2 V26 -#define T_3 V27 -#define T_4 V28 - -GLOBL ·constants<>(SB), RODATA, $0x30 -// EX0 -DATA ·constants<>+0x00(SB)/8, $0x0006050403020100 -DATA ·constants<>+0x08(SB)/8, $0x1016151413121110 -// EX1 -DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706 -DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716 -// EX2 -DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d -DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d - -// MULTIPLY multiplies each lane of f and g, partially reduced -// modulo 2¹³⁰ - 5. The result, h, consists of partial products -// in each lane that need to be reduced further to produce the -// final result. -// -// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰ -// -// Note that the multiplication by 5 of the high bits is -// achieved by precalculating the multiplication of four of the -// g coefficients by 5. These are g51-g54. -#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \ - VMLOF f0, g0, h0 \ - VMLOF f0, g3, h3 \ - VMLOF f0, g1, h1 \ - VMLOF f0, g4, h4 \ - VMLOF f0, g2, h2 \ - VMLOF f1, g54, T_0 \ - VMLOF f1, g2, T_3 \ - VMLOF f1, g0, T_1 \ - VMLOF f1, g3, T_4 \ - VMLOF f1, g1, T_2 \ - VMALOF f2, g53, h0, h0 \ - VMALOF f2, g1, h3, h3 \ - VMALOF f2, g54, h1, h1 \ - VMALOF f2, g2, h4, h4 \ - VMALOF f2, g0, h2, h2 \ - VMALOF f3, g52, T_0, T_0 \ - VMALOF f3, g0, T_3, T_3 \ - VMALOF f3, g53, T_1, T_1 \ - VMALOF f3, g1, T_4, T_4 \ - VMALOF f3, g54, T_2, T_2 \ - VMALOF f4, g51, h0, h0 \ - VMALOF f4, g54, h3, h3 \ - VMALOF f4, g52, h1, h1 \ - VMALOF f4, g0, h4, h4 \ - VMALOF f4, g53, h2, h2 \ - VAG T_0, h0, h0 \ - VAG T_3, h3, h3 \ - VAG T_1, h1, h1 \ - VAG T_4, h4, h4 \ - VAG T_2, h2, h2 - -// REDUCE performs the following carry operations in four -// stages, as specified in Bernstein & Schwabe: -// -// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4] -// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0] -// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3] -// 4: h₂₆[3]->h₂₆[4] -// -// The result is that all of the limbs are limited to 26-bits -// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits. -// -// Note that although each limb is aligned at 26-bit intervals -// they may contain values that exceed 2²⁶ - 1, hence the need -// to carry the excess bits in each limb. -#define REDUCE(h0, h1, h2, h3, h4) \ - VESRLG $26, h0, T_0 \ - VESRLG $26, h3, T_1 \ - VN MOD26, h0, h0 \ - VN MOD26, h3, h3 \ - VAG T_0, h1, h1 \ - VAG T_1, h4, h4 \ - VESRLG $26, h1, T_2 \ - VESRLG $26, h4, T_3 \ - VN MOD26, h1, h1 \ - VN MOD26, h4, h4 \ - VESLG $2, T_3, T_4 \ - VAG T_3, T_4, T_4 \ - VAG T_2, h2, h2 \ - VAG T_4, h0, h0 \ - VESRLG $26, h2, T_0 \ - VESRLG $26, h0, T_1 \ - VN MOD26, h2, h2 \ - VN MOD26, h0, h0 \ - VAG T_0, h3, h3 \ - VAG T_1, h1, h1 \ - VESRLG $26, h3, T_2 \ - VN MOD26, h3, h3 \ - VAG T_2, h4, h4 - -// EXPAND splits the 128-bit little-endian values in0 and in1 -// into 26-bit big-endian limbs and places the results into -// the first and second lane of d₂₆[0:4] respectively. -// -// The EX0, EX1 and EX2 constants are arrays of byte indices -// for permutation. The permutation both reverses the bytes -// in the input and ensures the bytes are copied into the -// destination limb ready to be shifted into their final -// position. -#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \ - VPERM in0, in1, EX0, d0 \ - VPERM in0, in1, EX1, d2 \ - VPERM in0, in1, EX2, d4 \ - VESRLG $26, d0, d1 \ - VESRLG $30, d2, d3 \ - VESRLG $4, d2, d2 \ - VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]] - VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]] - VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]] - VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]] - VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]] - -// func updateVX(state *macState, msg []byte) -TEXT ·updateVX(SB), NOSPLIT, $0 - MOVD state+0(FP), R1 - LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len - - // load EX0, EX1 and EX2 - MOVD $·constants<>(SB), R5 - VLM (R5), EX0, EX2 - - // generate masks - VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff] - VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff] - - // load h (accumulator) and r (key) from state - VZERO T_1 // [0, 0] - VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]] - VLEG $0, 16(R1), T_1 // [h₆₄[2], 0] - VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]] - VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]] - VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]] - - // unpack h and r into 26-bit limbs - // note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value - VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]] - VZERO H_1 // [0, 0] - VZERO H_3 // [0, 0] - VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out - VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0] - VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]] - VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only - VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]] - VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only - VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete - VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete - - // replicate r across all 4 vector elements - VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]] - VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]] - VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]] - VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]] - VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]] - - // zero out lane 1 of h - VLEIG $1, $0, H_0 // [h₂₆[0], 0] - VLEIG $1, $0, H_1 // [h₂₆[1], 0] - VLEIG $1, $0, H_2 // [h₂₆[2], 0] - VLEIG $1, $0, H_3 // [h₂₆[3], 0] - VLEIG $1, $0, H_4 // [h₂₆[4], 0] - - // calculate 5r (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]] - - // skip r² calculation if we are only calculating one block - CMPBLE R3, $16, skip - - // calculate r² - MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4) - REDUCE(M_0, M_1, M_2, M_3, M_4) - VGBM $0x0f0f, T_0 - VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]] - VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]] - VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]] - VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]] - VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]] - - // calculate 5r² (ignore least significant limb) - VREPIF $5, T_0 - VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]] - VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]] - VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]] - VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]] - -loop: - CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients - - // load next 2 blocks from message - VLM (R2), T_0, T_1 - - // update message slice - SUB $32, R3 - MOVD $32(R2), R2 - - // unpack message blocks into 26-bit big-endian limbs - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // add 2¹²⁸ to each message block value - VLEIB $4, $1, M_4 - VLEIB $12, $1, M_4 - -multiply: - // accumulate the incoming message - VAG H_0, M_0, M_0 - VAG H_3, M_3, M_3 - VAG H_1, M_1, M_1 - VAG H_4, M_4, M_4 - VAG H_2, M_2, M_2 - - // multiply the accumulator by the key coefficient - MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4) - - // carry and partially reduce the partial products - REDUCE(H_0, H_1, H_2, H_3, H_4) - - CMPBNE R3, $0, loop - -finish: - // sum lane 0 and lane 1 and put the result in lane 1 - VZERO T_0 - VSUMQG H_0, T_0, H_0 - VSUMQG H_3, T_0, H_3 - VSUMQG H_1, T_0, H_1 - VSUMQG H_4, T_0, H_4 - VSUMQG H_2, T_0, H_2 - - // reduce again after summation - // TODO(mundaym): there might be a more efficient way to do this - // now that we only have 1 active lane. For example, we could - // simultaneously pack the values as we reduce them. - REDUCE(H_0, H_1, H_2, H_3, H_4) - - // carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1 - // TODO(mundaym): in testing this final carry was unnecessary. - // Needs a proof before it can be removed though. - VESRLG $26, H_1, T_1 - VN MOD26, H_1, H_1 - VAQ T_1, H_2, H_2 - VESRLG $26, H_2, T_2 - VN MOD26, H_2, H_2 - VAQ T_2, H_3, H_3 - VESRLG $26, H_3, T_3 - VN MOD26, H_3, H_3 - VAQ T_3, H_4, H_4 - - // h is now < 2(2¹³⁰ - 5) - // Pack each lane in h₂₆[0:4] into h₁₂₈[0:1]. - VESLG $26, H_1, H_1 - VESLG $26, H_3, H_3 - VO H_0, H_1, H_0 - VO H_2, H_3, H_2 - VESLG $4, H_2, H_2 - VLEIB $7, $48, H_1 - VSLB H_1, H_2, H_2 - VO H_0, H_2, H_0 - VLEIB $7, $104, H_1 - VSLB H_1, H_4, H_3 - VO H_3, H_0, H_0 - VLEIB $7, $24, H_1 - VSRLB H_1, H_4, H_1 - - // update state - VSTEG $1, H_0, 0(R1) - VSTEG $0, H_0, 8(R1) - VSTEG $1, H_1, 16(R1) - RET - -b2: // 2 or fewer blocks remaining - CMPBLE R3, $16, b1 - - // Load the 2 remaining blocks (17-32 bytes remaining). - MOVD $-17(R3), R0 // index of final byte to load modulo 16 - VL (R2), T_0 // load full 16 byte block - VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16) - CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long - VLVGB R3, R0, T_1 // insert 1 into the byte at index R3 - - // Split both blocks into 26-bit limbs in the appropriate lanes. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the second to last block. - VLEIB $4, $1, M_4 - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $12, $1, M_4 - - // Finally, set up the coefficients for the final multiplication. - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r² so that can be kept the - // same. We want lane 1 to be multiplied by r so we need to move - // the saved r value into the 32-bit odd index in lane 1 by - // rotating the 64-bit lane by 32. - VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only - VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]] - VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]] - VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]] - VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]] - VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]] - VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]] - VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]] - VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]] - VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]] - - MOVD $0, R3 - BR multiply - -skip: - CMPBEQ R3, $0, finish - -b1: // 1 block remaining - - // Load the final block (1-16 bytes). This will be placed into - // lane 0. - MOVD $-1(R3), R0 - VLL R0, (R2), T_0 // pad to 16 bytes with zeros - - // The Poly1305 algorithm requires that a 1 bit be appended to - // each message block. If the final block is less than 16 bytes - // long then it is easiest to insert the 1 before the message - // block is split into 26-bit limbs. If, on the other hand, the - // final message block is 16 bytes long then we append the 1 bit - // after expansion as normal. - MOVBZ $1, R0 - CMPBEQ R3, $16, 2(PC) - VLVGB R3, R0, T_0 - - // Set the message block in lane 1 to the value 0 so that it - // can be accumulated without affecting the final result. - VZERO T_1 - - // Split the final message block into 26-bit limbs in lane 0. - // Lane 1 will be contain 0. - EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4) - - // Append a 1 byte to the end of the last block only if it is a - // full 16 byte block. - CMPBNE R3, $16, 2(PC) - VLEIB $4, $1, M_4 - - // We have previously saved r and 5r in the 32-bit even indexes - // of the R_[0-4] and R5_[1-4] coefficient registers. - // - // We want lane 0 to be multiplied by r so we need to move the - // saved r value into the 32-bit odd index in lane 0. We want - // lane 1 to be set to the value 1. This makes multiplication - // a no-op. We do this by setting lane 1 in every register to 0 - // and then just setting the 32-bit index 3 in R_0 to 1. - VZERO T_0 - MOVD $0, R0 - MOVD $0x10111213, R12 - VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000] - VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0] - VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0] - VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0] - VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0] - VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0] - VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0] - VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0] - VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0] - VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0] - - // Set the value of lane 1 to be 1. - VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1] - - MOVD $0, R3 - BR multiply diff --git a/vendor/golang.org/x/crypto/ssh/agent/client.go b/vendor/golang.org/x/crypto/ssh/agent/client.go deleted file mode 100644 index b909471cc066..000000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/client.go +++ /dev/null @@ -1,813 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package agent implements the ssh-agent protocol, and provides both -// a client and a server. The client can talk to a standard ssh-agent -// that uses UNIX sockets, and one could implement an alternative -// ssh-agent process using the sample server. -// -// References: -// [PROTOCOL.agent]: https://tools.ietf.org/html/draft-miller-ssh-agent-00 -package agent // import "golang.org/x/crypto/ssh/agent" - -import ( - "bytes" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/base64" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "sync" - - "crypto" - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// SignatureFlags represent additional flags that can be passed to the signature -// requests an defined in [PROTOCOL.agent] section 4.5.1. -type SignatureFlags uint32 - -// SignatureFlag values as defined in [PROTOCOL.agent] section 5.3. -const ( - SignatureFlagReserved SignatureFlags = 1 << iota - SignatureFlagRsaSha256 - SignatureFlagRsaSha512 -) - -// Agent represents the capabilities of an ssh-agent. -type Agent interface { - // List returns the identities known to the agent. - List() ([]*Key, error) - - // Sign has the agent sign the data using a protocol 2 key as defined - // in [PROTOCOL.agent] section 2.6.2. - Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) - - // Add adds a private key to the agent. - Add(key AddedKey) error - - // Remove removes all identities with the given public key. - Remove(key ssh.PublicKey) error - - // RemoveAll removes all identities. - RemoveAll() error - - // Lock locks the agent. Sign and Remove will fail, and List will empty an empty list. - Lock(passphrase []byte) error - - // Unlock undoes the effect of Lock - Unlock(passphrase []byte) error - - // Signers returns signers for all the known keys. - Signers() ([]ssh.Signer, error) -} - -type ExtendedAgent interface { - Agent - - // SignWithFlags signs like Sign, but allows for additional flags to be sent/received - SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) - - // Extension processes a custom extension request. Standard-compliant agents are not - // required to support any extensions, but this method allows agents to implement - // vendor-specific methods or add experimental features. See [PROTOCOL.agent] section 4.7. - // If agent extensions are unsupported entirely this method MUST return an - // ErrExtensionUnsupported error. Similarly, if just the specific extensionType in - // the request is unsupported by the agent then ErrExtensionUnsupported MUST be - // returned. - // - // In the case of success, since [PROTOCOL.agent] section 4.7 specifies that the contents - // of the response are unspecified (including the type of the message), the complete - // response will be returned as a []byte slice, including the "type" byte of the message. - Extension(extensionType string, contents []byte) ([]byte, error) -} - -// ConstraintExtension describes an optional constraint defined by users. -type ConstraintExtension struct { - // ExtensionName consist of a UTF-8 string suffixed by the - // implementation domain following the naming scheme defined - // in Section 4.2 of [RFC4251], e.g. "foo@example.com". - ExtensionName string - // ExtensionDetails contains the actual content of the extended - // constraint. - ExtensionDetails []byte -} - -// AddedKey describes an SSH key to be added to an Agent. -type AddedKey struct { - // PrivateKey must be a *rsa.PrivateKey, *dsa.PrivateKey, - // ed25519.PrivateKey or *ecdsa.PrivateKey, which will be inserted into the - // agent. - PrivateKey interface{} - // Certificate, if not nil, is communicated to the agent and will be - // stored with the key. - Certificate *ssh.Certificate - // Comment is an optional, free-form string. - Comment string - // LifetimeSecs, if not zero, is the number of seconds that the - // agent will store the key for. - LifetimeSecs uint32 - // ConfirmBeforeUse, if true, requests that the agent confirm with the - // user before each use of this key. - ConfirmBeforeUse bool - // ConstraintExtensions are the experimental or private-use constraints - // defined by users. - ConstraintExtensions []ConstraintExtension -} - -// See [PROTOCOL.agent], section 3. -const ( - agentRequestV1Identities = 1 - agentRemoveAllV1Identities = 9 - - // 3.2 Requests from client to agent for protocol 2 key operations - agentAddIdentity = 17 - agentRemoveIdentity = 18 - agentRemoveAllIdentities = 19 - agentAddIDConstrained = 25 - - // 3.3 Key-type independent requests from client to agent - agentAddSmartcardKey = 20 - agentRemoveSmartcardKey = 21 - agentLock = 22 - agentUnlock = 23 - agentAddSmartcardKeyConstrained = 26 - - // 3.7 Key constraint identifiers - agentConstrainLifetime = 1 - agentConstrainConfirm = 2 - agentConstrainExtension = 3 -) - -// maxAgentResponseBytes is the maximum agent reply size that is accepted. This -// is a sanity check, not a limit in the spec. -const maxAgentResponseBytes = 16 << 20 - -// Agent messages: -// These structures mirror the wire format of the corresponding ssh agent -// messages found in [PROTOCOL.agent]. - -// 3.4 Generic replies from agent to client -const agentFailure = 5 - -type failureAgentMsg struct{} - -const agentSuccess = 6 - -type successAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentRequestIdentities = 11 - -type requestIdentitiesAgentMsg struct{} - -// See [PROTOCOL.agent], section 2.5.2. -const agentIdentitiesAnswer = 12 - -type identitiesAnswerAgentMsg struct { - NumKeys uint32 `sshtype:"12"` - Keys []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 2.6.2. -const agentSignRequest = 13 - -type signRequestAgentMsg struct { - KeyBlob []byte `sshtype:"13"` - Data []byte - Flags uint32 -} - -// See [PROTOCOL.agent], section 2.6.2. - -// 3.6 Replies from agent to client for protocol 2 key operations -const agentSignResponse = 14 - -type signResponseAgentMsg struct { - SigBlob []byte `sshtype:"14"` -} - -type publicKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -// 3.7 Key constraint identifiers -type constrainLifetimeAgentMsg struct { - LifetimeSecs uint32 `sshtype:"1"` -} - -type constrainExtensionAgentMsg struct { - ExtensionName string `sshtype:"3"` - ExtensionDetails []byte - - // Rest is a field used for parsing, not part of message - Rest []byte `ssh:"rest"` -} - -// See [PROTOCOL.agent], section 4.7 -const agentExtension = 27 -const agentExtensionFailure = 28 - -// ErrExtensionUnsupported indicates that an extension defined in -// [PROTOCOL.agent] section 4.7 is unsupported by the agent. Specifically this -// error indicates that the agent returned a standard SSH_AGENT_FAILURE message -// as the result of a SSH_AGENTC_EXTENSION request. Note that the protocol -// specification (and therefore this error) does not distinguish between a -// specific extension being unsupported and extensions being unsupported entirely. -var ErrExtensionUnsupported = errors.New("agent: extension unsupported") - -type extensionAgentMsg struct { - ExtensionType string `sshtype:"27"` - Contents []byte -} - -// Key represents a protocol 2 public key as defined in -// [PROTOCOL.agent], section 2.5.2. -type Key struct { - Format string - Blob []byte - Comment string -} - -func clientErr(err error) error { - return fmt.Errorf("agent: client error: %v", err) -} - -// String returns the storage form of an agent key with the format, base64 -// encoded serialized key, and the comment if it is not empty. -func (k *Key) String() string { - s := string(k.Format) + " " + base64.StdEncoding.EncodeToString(k.Blob) - - if k.Comment != "" { - s += " " + k.Comment - } - - return s -} - -// Type returns the public key type. -func (k *Key) Type() string { - return k.Format -} - -// Marshal returns key blob to satisfy the ssh.PublicKey interface. -func (k *Key) Marshal() []byte { - return k.Blob -} - -// Verify satisfies the ssh.PublicKey interface. -func (k *Key) Verify(data []byte, sig *ssh.Signature) error { - pubKey, err := ssh.ParsePublicKey(k.Blob) - if err != nil { - return fmt.Errorf("agent: bad public key: %v", err) - } - return pubKey.Verify(data, sig) -} - -type wireKey struct { - Format string - Rest []byte `ssh:"rest"` -} - -func parseKey(in []byte) (out *Key, rest []byte, err error) { - var record struct { - Blob []byte - Comment string - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(in, &record); err != nil { - return nil, nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(record.Blob, &wk); err != nil { - return nil, nil, err - } - - return &Key{ - Format: wk.Format, - Blob: record.Blob, - Comment: record.Comment, - }, record.Rest, nil -} - -// client is a client for an ssh-agent process. -type client struct { - // conn is typically a *net.UnixConn - conn io.ReadWriter - // mu is used to prevent concurrent access to the agent - mu sync.Mutex -} - -// NewClient returns an Agent that talks to an ssh-agent process over -// the given connection. -func NewClient(rw io.ReadWriter) ExtendedAgent { - return &client{conn: rw} -} - -// call sends an RPC to the agent. On success, the reply is -// unmarshaled into reply and replyType is set to the first byte of -// the reply, which contains the type of the message. -func (c *client) call(req []byte) (reply interface{}, err error) { - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - reply, err = unmarshal(buf) - if err != nil { - return nil, clientErr(err) - } - return reply, nil -} - -// callRaw sends an RPC to the agent. On success, the raw -// bytes of the response are returned; no unmarshalling is -// performed on the response. -func (c *client) callRaw(req []byte) (reply []byte, err error) { - c.mu.Lock() - defer c.mu.Unlock() - - msg := make([]byte, 4+len(req)) - binary.BigEndian.PutUint32(msg, uint32(len(req))) - copy(msg[4:], req) - if _, err = c.conn.Write(msg); err != nil { - return nil, clientErr(err) - } - - var respSizeBuf [4]byte - if _, err = io.ReadFull(c.conn, respSizeBuf[:]); err != nil { - return nil, clientErr(err) - } - respSize := binary.BigEndian.Uint32(respSizeBuf[:]) - if respSize > maxAgentResponseBytes { - return nil, clientErr(errors.New("response too large")) - } - - buf := make([]byte, respSize) - if _, err = io.ReadFull(c.conn, buf); err != nil { - return nil, clientErr(err) - } - return buf, nil -} - -func (c *client) simpleCall(req []byte) error { - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -func (c *client) RemoveAll() error { - return c.simpleCall([]byte{agentRemoveAllIdentities}) -} - -func (c *client) Remove(key ssh.PublicKey) error { - req := ssh.Marshal(&agentRemoveIdentityMsg{ - KeyBlob: key.Marshal(), - }) - return c.simpleCall(req) -} - -func (c *client) Lock(passphrase []byte) error { - req := ssh.Marshal(&agentLockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -func (c *client) Unlock(passphrase []byte) error { - req := ssh.Marshal(&agentUnlockMsg{ - Passphrase: passphrase, - }) - return c.simpleCall(req) -} - -// List returns the identities known to the agent. -func (c *client) List() ([]*Key, error) { - // see [PROTOCOL.agent] section 2.5.2. - req := []byte{agentRequestIdentities} - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *identitiesAnswerAgentMsg: - if msg.NumKeys > maxAgentResponseBytes/8 { - return nil, errors.New("agent: too many keys in agent reply") - } - keys := make([]*Key, msg.NumKeys) - data := msg.Keys - for i := uint32(0); i < msg.NumKeys; i++ { - var key *Key - var err error - if key, data, err = parseKey(data); err != nil { - return nil, err - } - keys[i] = key - } - return keys, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to list keys") - } - panic("unreachable") -} - -// Sign has the agent sign the data using a protocol 2 key as defined -// in [PROTOCOL.agent] section 2.6.2. -func (c *client) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return c.SignWithFlags(key, data, 0) -} - -func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - req := ssh.Marshal(signRequestAgentMsg{ - KeyBlob: key.Marshal(), - Data: data, - Flags: uint32(flags), - }) - - msg, err := c.call(req) - if err != nil { - return nil, err - } - - switch msg := msg.(type) { - case *signResponseAgentMsg: - var sig ssh.Signature - if err := ssh.Unmarshal(msg.SigBlob, &sig); err != nil { - return nil, err - } - - return &sig, nil - case *failureAgentMsg: - return nil, errors.New("agent: failed to sign challenge") - } - panic("unreachable") -} - -// unmarshal parses an agent message in packet, returning the parsed -// form and the message type of packet. -func unmarshal(packet []byte) (interface{}, error) { - if len(packet) < 1 { - return nil, errors.New("agent: empty packet") - } - var msg interface{} - switch packet[0] { - case agentFailure: - return new(failureAgentMsg), nil - case agentSuccess: - return new(successAgentMsg), nil - case agentIdentitiesAnswer: - msg = new(identitiesAnswerAgentMsg) - case agentSignResponse: - msg = new(signResponseAgentMsg) - case agentV1IdentitiesAnswer: - msg = new(agentV1IdentityMsg) - default: - return nil, fmt.Errorf("agent: unknown type tag %d", packet[0]) - } - if err := ssh.Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -type rsaKeyMsg struct { - Type string `sshtype:"17|25"` - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaKeyMsg struct { - Type string `sshtype:"17|25"` - P *big.Int - Q *big.Int - G *big.Int - Y *big.Int - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaKeyMsg struct { - Type string `sshtype:"17|25"` - Curve string - KeyBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519KeyMsg struct { - Type string `sshtype:"17|25"` - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Insert adds a private key to the agent. -func (c *client) insertKey(s interface{}, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaKeyMsg{ - Type: ssh.KeyAlgoRSA, - N: k.N, - E: big.NewInt(int64(k.E)), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaKeyMsg{ - Type: ssh.KeyAlgoDSA, - P: k.P, - Q: k.Q, - G: k.G, - Y: k.Y, - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - nistID := fmt.Sprintf("nistp%d", k.Params().BitSize) - req = ssh.Marshal(ecdsaKeyMsg{ - Type: "ecdsa-sha2-" + nistID, - Curve: nistID, - KeyBytes: elliptic.Marshal(k.Curve, k.X, k.Y), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case ed25519.PrivateKey: - req = ssh.Marshal(ed25519KeyMsg{ - Type: ssh.KeyAlgoED25519, - Pub: []byte(k)[32:], - Priv: []byte(k), - Comments: comment, - Constraints: constraints, - }) - // This function originally supported only *ed25519.PrivateKey, however the - // general idiom is to pass ed25519.PrivateKey by value, not by pointer. - // We still support the pointer variant for backwards compatibility. - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519KeyMsg{ - Type: ssh.KeyAlgoED25519, - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -type rsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Iqmp *big.Int // IQMP = Inverse Q Mod P - P *big.Int - Q *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type dsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - X *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ecdsaCertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - D *big.Int - Comments string - Constraints []byte `ssh:"rest"` -} - -type ed25519CertMsg struct { - Type string `sshtype:"17|25"` - CertBytes []byte - Pub []byte - Priv []byte - Comments string - Constraints []byte `ssh:"rest"` -} - -// Add adds a private key to the agent. If a certificate is given, -// that certificate is added instead as public key. -func (c *client) Add(key AddedKey) error { - var constraints []byte - - if secs := key.LifetimeSecs; secs != 0 { - constraints = append(constraints, ssh.Marshal(constrainLifetimeAgentMsg{secs})...) - } - - if key.ConfirmBeforeUse { - constraints = append(constraints, agentConstrainConfirm) - } - - cert := key.Certificate - if cert == nil { - return c.insertKey(key.PrivateKey, key.Comment, constraints) - } - return c.insertCert(key.PrivateKey, cert, key.Comment, constraints) -} - -func (c *client) insertCert(s interface{}, cert *ssh.Certificate, comment string, constraints []byte) error { - var req []byte - switch k := s.(type) { - case *rsa.PrivateKey: - if len(k.Primes) != 2 { - return fmt.Errorf("agent: unsupported RSA key with %d primes", len(k.Primes)) - } - k.Precompute() - req = ssh.Marshal(rsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Iqmp: k.Precomputed.Qinv, - P: k.Primes[0], - Q: k.Primes[1], - Comments: comment, - Constraints: constraints, - }) - case *dsa.PrivateKey: - req = ssh.Marshal(dsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - X: k.X, - Comments: comment, - Constraints: constraints, - }) - case *ecdsa.PrivateKey: - req = ssh.Marshal(ecdsaCertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - D: k.D, - Comments: comment, - Constraints: constraints, - }) - case ed25519.PrivateKey: - req = ssh.Marshal(ed25519CertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - Pub: []byte(k)[32:], - Priv: []byte(k), - Comments: comment, - Constraints: constraints, - }) - // This function originally supported only *ed25519.PrivateKey, however the - // general idiom is to pass ed25519.PrivateKey by value, not by pointer. - // We still support the pointer variant for backwards compatibility. - case *ed25519.PrivateKey: - req = ssh.Marshal(ed25519CertMsg{ - Type: cert.Type(), - CertBytes: cert.Marshal(), - Pub: []byte(*k)[32:], - Priv: []byte(*k), - Comments: comment, - Constraints: constraints, - }) - default: - return fmt.Errorf("agent: unsupported key type %T", s) - } - - // if constraints are present then the message type needs to be changed. - if len(constraints) != 0 { - req[0] = agentAddIDConstrained - } - - signer, err := ssh.NewSignerFromKey(s) - if err != nil { - return err - } - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return errors.New("agent: signer and cert have different public key") - } - - resp, err := c.call(req) - if err != nil { - return err - } - if _, ok := resp.(*successAgentMsg); ok { - return nil - } - return errors.New("agent: failure") -} - -// Signers provides a callback for client authentication. -func (c *client) Signers() ([]ssh.Signer, error) { - keys, err := c.List() - if err != nil { - return nil, err - } - - var result []ssh.Signer - for _, k := range keys { - result = append(result, &agentKeyringSigner{c, k}) - } - return result, nil -} - -type agentKeyringSigner struct { - agent *client - pub ssh.PublicKey -} - -func (s *agentKeyringSigner) PublicKey() ssh.PublicKey { - return s.pub -} - -func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature, error) { - // The agent has its own entropy source, so the rand argument is ignored. - return s.agent.Sign(s.pub, data) -} - -func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) { - var flags SignatureFlags - if opts != nil { - switch opts.HashFunc() { - case crypto.SHA256: - flags = SignatureFlagRsaSha256 - case crypto.SHA512: - flags = SignatureFlagRsaSha512 - } - } - return s.agent.SignWithFlags(s.pub, data, flags) -} - -// Calls an extension method. It is up to the agent implementation as to whether or not -// any particular extension is supported and may always return an error. Because the -// type of the response is up to the implementation, this returns the bytes of the -// response and does not attempt any type of unmarshalling. -func (c *client) Extension(extensionType string, contents []byte) ([]byte, error) { - req := ssh.Marshal(extensionAgentMsg{ - ExtensionType: extensionType, - Contents: contents, - }) - buf, err := c.callRaw(req) - if err != nil { - return nil, err - } - if len(buf) == 0 { - return nil, errors.New("agent: failure; empty response") - } - // [PROTOCOL.agent] section 4.7 indicates that an SSH_AGENT_FAILURE message - // represents an agent that does not support the extension - if buf[0] == agentFailure { - return nil, ErrExtensionUnsupported - } - if buf[0] == agentExtensionFailure { - return nil, errors.New("agent: generic extension failure") - } - - return buf, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/forward.go b/vendor/golang.org/x/crypto/ssh/agent/forward.go deleted file mode 100644 index fd24ba900d25..000000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/forward.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "errors" - "io" - "net" - "sync" - - "golang.org/x/crypto/ssh" -) - -// RequestAgentForwarding sets up agent forwarding for the session. -// ForwardToAgent or ForwardToRemote should be called to route -// the authentication requests. -func RequestAgentForwarding(session *ssh.Session) error { - ok, err := session.SendRequest("auth-agent-req@openssh.com", true, nil) - if err != nil { - return err - } - if !ok { - return errors.New("forwarding request denied") - } - return nil -} - -// ForwardToAgent routes authentication requests to the given keyring. -func ForwardToAgent(client *ssh.Client, keyring Agent) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go func() { - ServeAgent(keyring, channel) - channel.Close() - }() - } - }() - return nil -} - -const channelType = "auth-agent@openssh.com" - -// ForwardToRemote routes authentication requests to the ssh-agent -// process serving on the given unix socket. -func ForwardToRemote(client *ssh.Client, addr string) error { - channels := client.HandleChannelOpen(channelType) - if channels == nil { - return errors.New("agent: already have handler for " + channelType) - } - conn, err := net.Dial("unix", addr) - if err != nil { - return err - } - conn.Close() - - go func() { - for ch := range channels { - channel, reqs, err := ch.Accept() - if err != nil { - continue - } - go ssh.DiscardRequests(reqs) - go forwardUnixSocket(channel, addr) - } - }() - return nil -} - -func forwardUnixSocket(channel ssh.Channel, addr string) { - conn, err := net.Dial("unix", addr) - if err != nil { - return - } - - var wg sync.WaitGroup - wg.Add(2) - go func() { - io.Copy(conn, channel) - conn.(*net.UnixConn).CloseWrite() - wg.Done() - }() - go func() { - io.Copy(channel, conn) - channel.CloseWrite() - wg.Done() - }() - - wg.Wait() - conn.Close() - channel.Close() -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/keyring.go b/vendor/golang.org/x/crypto/ssh/agent/keyring.go deleted file mode 100644 index c9d979430712..000000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/keyring.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "bytes" - "crypto/rand" - "crypto/subtle" - "errors" - "fmt" - "sync" - "time" - - "golang.org/x/crypto/ssh" -) - -type privKey struct { - signer ssh.Signer - comment string - expire *time.Time -} - -type keyring struct { - mu sync.Mutex - keys []privKey - - locked bool - passphrase []byte -} - -var errLocked = errors.New("agent: locked") - -// NewKeyring returns an Agent that holds keys in memory. It is safe -// for concurrent use by multiple goroutines. -func NewKeyring() Agent { - return &keyring{} -} - -// RemoveAll removes all identities. -func (r *keyring) RemoveAll() error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.keys = nil - return nil -} - -// removeLocked does the actual key removal. The caller must already be holding the -// keyring mutex. -func (r *keyring) removeLocked(want []byte) error { - found := false - for i := 0; i < len(r.keys); { - if bytes.Equal(r.keys[i].signer.PublicKey().Marshal(), want) { - found = true - r.keys[i] = r.keys[len(r.keys)-1] - r.keys = r.keys[:len(r.keys)-1] - continue - } else { - i++ - } - } - - if !found { - return errors.New("agent: key not found") - } - return nil -} - -// Remove removes all identities with the given public key. -func (r *keyring) Remove(key ssh.PublicKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - return r.removeLocked(key.Marshal()) -} - -// Lock locks the agent. Sign and Remove will fail, and List will return an empty list. -func (r *keyring) Lock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - - r.locked = true - r.passphrase = passphrase - return nil -} - -// Unlock undoes the effect of Lock -func (r *keyring) Unlock(passphrase []byte) error { - r.mu.Lock() - defer r.mu.Unlock() - if !r.locked { - return errors.New("agent: not locked") - } - if 1 != subtle.ConstantTimeCompare(passphrase, r.passphrase) { - return fmt.Errorf("agent: incorrect passphrase") - } - - r.locked = false - r.passphrase = nil - return nil -} - -// expireKeysLocked removes expired keys from the keyring. If a key was added -// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have -// ellapsed, it is removed. The caller *must* be holding the keyring mutex. -func (r *keyring) expireKeysLocked() { - for _, k := range r.keys { - if k.expire != nil && time.Now().After(*k.expire) { - r.removeLocked(k.signer.PublicKey().Marshal()) - } - } -} - -// List returns the identities known to the agent. -func (r *keyring) List() ([]*Key, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - // section 2.7: locked agents return empty. - return nil, nil - } - - r.expireKeysLocked() - var ids []*Key - for _, k := range r.keys { - pub := k.signer.PublicKey() - ids = append(ids, &Key{ - Format: pub.Type(), - Blob: pub.Marshal(), - Comment: k.comment}) - } - return ids, nil -} - -// Insert adds a private key to the keyring. If a certificate -// is given, that certificate is added as public key. Note that -// any constraints given are ignored. -func (r *keyring) Add(key AddedKey) error { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return errLocked - } - signer, err := ssh.NewSignerFromKey(key.PrivateKey) - - if err != nil { - return err - } - - if cert := key.Certificate; cert != nil { - signer, err = ssh.NewCertSigner(cert, signer) - if err != nil { - return err - } - } - - p := privKey{ - signer: signer, - comment: key.Comment, - } - - if key.LifetimeSecs > 0 { - t := time.Now().Add(time.Duration(key.LifetimeSecs) * time.Second) - p.expire = &t - } - - r.keys = append(r.keys, p) - - return nil -} - -// Sign returns a signature for the data. -func (r *keyring) Sign(key ssh.PublicKey, data []byte) (*ssh.Signature, error) { - return r.SignWithFlags(key, data, 0) -} - -func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFlags) (*ssh.Signature, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - wanted := key.Marshal() - for _, k := range r.keys { - if bytes.Equal(k.signer.PublicKey().Marshal(), wanted) { - if flags == 0 { - return k.signer.Sign(rand.Reader, data) - } else { - if algorithmSigner, ok := k.signer.(ssh.AlgorithmSigner); !ok { - return nil, fmt.Errorf("agent: signature does not support non-default signature algorithm: %T", k.signer) - } else { - var algorithm string - switch flags { - case SignatureFlagRsaSha256: - algorithm = ssh.SigAlgoRSASHA2256 - case SignatureFlagRsaSha512: - algorithm = ssh.SigAlgoRSASHA2512 - default: - return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags) - } - return algorithmSigner.SignWithAlgorithm(rand.Reader, data, algorithm) - } - } - } - } - return nil, errors.New("not found") -} - -// Signers returns signers for all the known keys. -func (r *keyring) Signers() ([]ssh.Signer, error) { - r.mu.Lock() - defer r.mu.Unlock() - if r.locked { - return nil, errLocked - } - - r.expireKeysLocked() - s := make([]ssh.Signer, 0, len(r.keys)) - for _, k := range r.keys { - s = append(s, k.signer) - } - return s, nil -} - -// The keyring does not support any extensions -func (r *keyring) Extension(extensionType string, contents []byte) ([]byte, error) { - return nil, ErrExtensionUnsupported -} diff --git a/vendor/golang.org/x/crypto/ssh/agent/server.go b/vendor/golang.org/x/crypto/ssh/agent/server.go deleted file mode 100644 index 6e7a1e02f27b..000000000000 --- a/vendor/golang.org/x/crypto/ssh/agent/server.go +++ /dev/null @@ -1,570 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package agent - -import ( - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "math/big" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh" -) - -// Server wraps an Agent and uses it to implement the agent side of -// the SSH-agent, wire protocol. -type server struct { - agent Agent -} - -func (s *server) processRequestBytes(reqData []byte) []byte { - rep, err := s.processRequest(reqData) - if err != nil { - if err != errLocked { - // TODO(hanwen): provide better logging interface? - log.Printf("agent %d: %v", reqData[0], err) - } - return []byte{agentFailure} - } - - if err == nil && rep == nil { - return []byte{agentSuccess} - } - - return ssh.Marshal(rep) -} - -func marshalKey(k *Key) []byte { - var record struct { - Blob []byte - Comment string - } - record.Blob = k.Marshal() - record.Comment = k.Comment - - return ssh.Marshal(&record) -} - -// See [PROTOCOL.agent], section 2.5.1. -const agentV1IdentitiesAnswer = 2 - -type agentV1IdentityMsg struct { - Numkeys uint32 `sshtype:"2"` -} - -type agentRemoveIdentityMsg struct { - KeyBlob []byte `sshtype:"18"` -} - -type agentLockMsg struct { - Passphrase []byte `sshtype:"22"` -} - -type agentUnlockMsg struct { - Passphrase []byte `sshtype:"23"` -} - -func (s *server) processRequest(data []byte) (interface{}, error) { - switch data[0] { - case agentRequestV1Identities: - return &agentV1IdentityMsg{0}, nil - - case agentRemoveAllV1Identities: - return nil, nil - - case agentRemoveIdentity: - var req agentRemoveIdentityMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - return nil, s.agent.Remove(&Key{Format: wk.Format, Blob: req.KeyBlob}) - - case agentRemoveAllIdentities: - return nil, s.agent.RemoveAll() - - case agentLock: - var req agentLockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - return nil, s.agent.Lock(req.Passphrase) - - case agentUnlock: - var req agentUnlockMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - return nil, s.agent.Unlock(req.Passphrase) - - case agentSignRequest: - var req signRequestAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - - var wk wireKey - if err := ssh.Unmarshal(req.KeyBlob, &wk); err != nil { - return nil, err - } - - k := &Key{ - Format: wk.Format, - Blob: req.KeyBlob, - } - - var sig *ssh.Signature - var err error - if extendedAgent, ok := s.agent.(ExtendedAgent); ok { - sig, err = extendedAgent.SignWithFlags(k, req.Data, SignatureFlags(req.Flags)) - } else { - sig, err = s.agent.Sign(k, req.Data) - } - - if err != nil { - return nil, err - } - return &signResponseAgentMsg{SigBlob: ssh.Marshal(sig)}, nil - - case agentRequestIdentities: - keys, err := s.agent.List() - if err != nil { - return nil, err - } - - rep := identitiesAnswerAgentMsg{ - NumKeys: uint32(len(keys)), - } - for _, k := range keys { - rep.Keys = append(rep.Keys, marshalKey(k)...) - } - return rep, nil - - case agentAddIDConstrained, agentAddIdentity: - return nil, s.insertIdentity(data) - - case agentExtension: - // Return a stub object where the whole contents of the response gets marshaled. - var responseStub struct { - Rest []byte `ssh:"rest"` - } - - if extendedAgent, ok := s.agent.(ExtendedAgent); !ok { - // If this agent doesn't implement extensions, [PROTOCOL.agent] section 4.7 - // requires that we return a standard SSH_AGENT_FAILURE message. - responseStub.Rest = []byte{agentFailure} - } else { - var req extensionAgentMsg - if err := ssh.Unmarshal(data, &req); err != nil { - return nil, err - } - res, err := extendedAgent.Extension(req.ExtensionType, req.Contents) - if err != nil { - // If agent extensions are unsupported, return a standard SSH_AGENT_FAILURE - // message as required by [PROTOCOL.agent] section 4.7. - if err == ErrExtensionUnsupported { - responseStub.Rest = []byte{agentFailure} - } else { - // As the result of any other error processing an extension request, - // [PROTOCOL.agent] section 4.7 requires that we return a - // SSH_AGENT_EXTENSION_FAILURE code. - responseStub.Rest = []byte{agentExtensionFailure} - } - } else { - if len(res) == 0 { - return nil, nil - } - responseStub.Rest = res - } - } - - return responseStub, nil - } - - return nil, fmt.Errorf("unknown opcode %d", data[0]) -} - -func parseConstraints(constraints []byte) (lifetimeSecs uint32, confirmBeforeUse bool, extensions []ConstraintExtension, err error) { - for len(constraints) != 0 { - switch constraints[0] { - case agentConstrainLifetime: - lifetimeSecs = binary.BigEndian.Uint32(constraints[1:5]) - constraints = constraints[5:] - case agentConstrainConfirm: - confirmBeforeUse = true - constraints = constraints[1:] - case agentConstrainExtension: - var msg constrainExtensionAgentMsg - if err = ssh.Unmarshal(constraints, &msg); err != nil { - return 0, false, nil, err - } - extensions = append(extensions, ConstraintExtension{ - ExtensionName: msg.ExtensionName, - ExtensionDetails: msg.ExtensionDetails, - }) - constraints = msg.Rest - default: - return 0, false, nil, fmt.Errorf("unknown constraint type: %d", constraints[0]) - } - } - return -} - -func setConstraints(key *AddedKey, constraintBytes []byte) error { - lifetimeSecs, confirmBeforeUse, constraintExtensions, err := parseConstraints(constraintBytes) - if err != nil { - return err - } - - key.LifetimeSecs = lifetimeSecs - key.ConfirmBeforeUse = confirmBeforeUse - key.ConstraintExtensions = constraintExtensions - return nil -} - -func parseRSAKey(req []byte) (*AddedKey, error) { - var k rsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - if k.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - priv := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(k.E.Int64()), - N: k.N, - }, - D: k.D, - Primes: []*big.Int{k.P, k.Q}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseEd25519Key(req []byte) (*AddedKey, error) { - var k ed25519KeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - - addedKey := &AddedKey{PrivateKey: &priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSAKey(req []byte) (*AddedKey, error) { - var k dsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func unmarshalECDSA(curveName string, keyBytes []byte, privScalar *big.Int) (priv *ecdsa.PrivateKey, err error) { - priv = &ecdsa.PrivateKey{ - D: privScalar, - } - - switch curveName { - case "nistp256": - priv.Curve = elliptic.P256() - case "nistp384": - priv.Curve = elliptic.P384() - case "nistp521": - priv.Curve = elliptic.P521() - default: - return nil, fmt.Errorf("agent: unknown curve %q", curveName) - } - - priv.X, priv.Y = elliptic.Unmarshal(priv.Curve, keyBytes) - if priv.X == nil || priv.Y == nil { - return nil, errors.New("agent: point not on curve") - } - - return priv, nil -} - -func parseEd25519Cert(req []byte) (*AddedKey, error) { - var k ed25519CertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - priv := ed25519.PrivateKey(k.Priv) - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ED25519 certificate") - } - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSAKey(req []byte) (*AddedKey, error) { - var k ecdsaKeyMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(k.Curve, k.KeyBytes, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseRSACert(req []byte) (*AddedKey, error) { - var k rsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad RSA certificate") - } - - // An RSA publickey as marshaled by rsaPublicKey.Marshal() in keys.go - var rsaPub struct { - Name string - E *big.Int - N *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &rsaPub); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - if rsaPub.E.BitLen() > 30 { - return nil, errors.New("agent: RSA public exponent too large") - } - - priv := rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - E: int(rsaPub.E.Int64()), - N: rsaPub.N, - }, - D: k.D, - Primes: []*big.Int{k.Q, k.P}, - } - priv.Precompute() - - addedKey := &AddedKey{PrivateKey: &priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseDSACert(req []byte) (*AddedKey, error) { - var k dsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad DSA certificate") - } - - // A DSA publickey as marshaled by dsaPublicKey.Marshal() in keys.go - var w struct { - Name string - P, Q, G, Y *big.Int - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &w); err != nil { - return nil, fmt.Errorf("agent: Unmarshal failed to parse public key: %v", err) - } - - priv := &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - }, - Y: w.Y, - }, - X: k.X, - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func parseECDSACert(req []byte) (*AddedKey, error) { - var k ecdsaCertMsg - if err := ssh.Unmarshal(req, &k); err != nil { - return nil, err - } - - pubKey, err := ssh.ParsePublicKey(k.CertBytes) - if err != nil { - return nil, err - } - cert, ok := pubKey.(*ssh.Certificate) - if !ok { - return nil, errors.New("agent: bad ECDSA certificate") - } - - // An ECDSA publickey as marshaled by ecdsaPublicKey.Marshal() in keys.go - var ecdsaPub struct { - Name string - ID string - Key []byte - } - if err := ssh.Unmarshal(cert.Key.Marshal(), &ecdsaPub); err != nil { - return nil, err - } - - priv, err := unmarshalECDSA(ecdsaPub.ID, ecdsaPub.Key, k.D) - if err != nil { - return nil, err - } - - addedKey := &AddedKey{PrivateKey: priv, Certificate: cert, Comment: k.Comments} - if err := setConstraints(addedKey, k.Constraints); err != nil { - return nil, err - } - return addedKey, nil -} - -func (s *server) insertIdentity(req []byte) error { - var record struct { - Type string `sshtype:"17|25"` - Rest []byte `ssh:"rest"` - } - - if err := ssh.Unmarshal(req, &record); err != nil { - return err - } - - var addedKey *AddedKey - var err error - - switch record.Type { - case ssh.KeyAlgoRSA: - addedKey, err = parseRSAKey(req) - case ssh.KeyAlgoDSA: - addedKey, err = parseDSAKey(req) - case ssh.KeyAlgoECDSA256, ssh.KeyAlgoECDSA384, ssh.KeyAlgoECDSA521: - addedKey, err = parseECDSAKey(req) - case ssh.KeyAlgoED25519: - addedKey, err = parseEd25519Key(req) - case ssh.CertAlgoRSAv01: - addedKey, err = parseRSACert(req) - case ssh.CertAlgoDSAv01: - addedKey, err = parseDSACert(req) - case ssh.CertAlgoECDSA256v01, ssh.CertAlgoECDSA384v01, ssh.CertAlgoECDSA521v01: - addedKey, err = parseECDSACert(req) - case ssh.CertAlgoED25519v01: - addedKey, err = parseEd25519Cert(req) - default: - return fmt.Errorf("agent: not implemented: %q", record.Type) - } - - if err != nil { - return err - } - return s.agent.Add(*addedKey) -} - -// ServeAgent serves the agent protocol on the given connection. It -// returns when an I/O error occurs. -func ServeAgent(agent Agent, c io.ReadWriter) error { - s := &server{agent} - - var length [4]byte - for { - if _, err := io.ReadFull(c, length[:]); err != nil { - return err - } - l := binary.BigEndian.Uint32(length[:]) - if l == 0 { - return fmt.Errorf("agent: request size is 0") - } - if l > maxAgentResponseBytes { - // We also cap requests. - return fmt.Errorf("agent: request too large: %d", l) - } - - req := make([]byte, l) - if _, err := io.ReadFull(c, req); err != nil { - return err - } - - repData := s.processRequestBytes(req) - if len(repData) > maxAgentResponseBytes { - return fmt.Errorf("agent: reply too large: %d bytes", len(repData)) - } - - binary.BigEndian.PutUint32(length[:], uint32(len(repData))) - if _, err := c.Write(length[:]); err != nil { - return err - } - if _, err := c.Write(repData); err != nil { - return err - } - } -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 1ab07d078db1..000000000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive io.EOF. -func (b *buffer) eof() { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index 916c840b6988..000000000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,546 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" - CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte - Rest []byte `ssh:"rest"` -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. The Certificate type implements the -// PublicKey interface, so it can be unmarshaled using -// ParsePublicKey. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -type algorithmOpenSSHCertSigner struct { - *openSSHCertSigner - algorithmSigner AlgorithmSigner -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - if algorithmSigner, ok := signer.(AlgorithmSigner); ok { - return &algorithmOpenSSHCertSigner{ - &openSSHCertSigner{cert, signer}, algorithmSigner}, nil - } else { - return &openSSHCertSigner{cert, signer}, nil - } -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -func (s *algorithmOpenSSHCertSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - return s.algorithmSigner.SignWithAlgorithm(rand, data, algorithm) -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsUserAuthority should return true if the key is recognized as an - // authority for the given user certificate. This allows for - // certificates to be signed by other certificates. This must be set - // if this CertChecker will be checking user certificates. - IsUserAuthority func(auth PublicKey) bool - - // IsHostAuthority should report whether the key is recognized as - // an authority for this host. This allows for certificates to be - // signed by other keys, and for those other keys to only be valid - // signers for particular hostnames. This must be set if this - // CertChecker will be checking host certificates. - IsHostAuthority func(auth PublicKey, address string) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback HostKeyCallback - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - if !c.IsHostAuthority(cert.SignatureKey, addr) { - return fmt.Errorf("ssh: no authorities for hostname: %v", addr) - } - - hostname, _, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - // Pass hostname only as principal for host certificates (consistent with OpenSSH) - return c.CheckCert(hostname, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - if !c.IsUserAuthority(cert.SignatureKey) { - return nil, fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certificate serial %d revoked", cert.Serial) - } - - for opt := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert signs the certificate with an authority, setting the Nonce, -// SignatureKey, and Signature fields. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01, - KeyAlgoED25519: CertAlgoED25519v01, - KeyAlgoSKED25519: CertAlgoSKED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - switch out.Format { - case KeyAlgoSKECDSA256, CertAlgoSKECDSA256v01, KeyAlgoSKED25519, CertAlgoSKED25519v01: - out.Rest = in - return out, nil, ok - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index c0834c00dfee..000000000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (ch *channel) writePacket(packet []byte) error { - ch.writeMu.Lock() - if ch.sentClose { - ch.writeMu.Unlock() - return io.EOF - } - ch.sentClose = (packet[0] == msgChannelClose) - err := ch.mux.conn.writePacket(packet) - ch.writeMu.Unlock() - return err -} - -func (ch *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", ch.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], ch.remoteId) - return ch.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (ch *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if ch.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - ch.writeMu.Lock() - packet := ch.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - ch.writeMu.Unlock() - - for len(data) > 0 { - space := min(ch.maxRemotePayload, len(data)) - if space, err = ch.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], ch.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = ch.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - ch.writeMu.Lock() - ch.packetPool[extendedCode] = packet - ch.writeMu.Unlock() - - return n, err -} - -func (ch *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > ch.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - ch.windowMu.Lock() - if ch.myWindow < length { - ch.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - ch.myWindow -= length - ch.windowMu.Unlock() - - if extended == 1 { - ch.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - ch.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (ch *channel) responseMessageReceived() error { - if ch.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if ch.decided { - return errors.New("ssh: duplicate response received for channel") - } - ch.decided = true - return nil -} - -func (ch *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return ch.handleData(packet) - case msgChannelClose: - ch.sendMessage(channelCloseMsg{PeersID: ch.remoteId}) - ch.mux.chanList.remove(ch.localId) - ch.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - ch.extPending.eof() - ch.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - ch.mux.chanList.remove(msg.PeersID) - ch.msg <- msg - case *channelOpenConfirmMsg: - if err := ch.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - ch.remoteId = msg.MyID - ch.maxRemotePayload = msg.MaxPacketSize - ch.remoteWin.add(msg.MyWindow) - ch.msg <- msg - case *windowAdjustMsg: - if !ch.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: ch, - } - - ch.incomingRequests <- &req - default: - ch.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, chanSize), - msg: make(chan interface{}, chanSize), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (ch *channel) Accept() (Channel, <-chan *Request, error) { - if ch.decided { - return nil, nil, errDecidedAlready - } - ch.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersID: ch.remoteId, - MyID: ch.localId, - MyWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - } - ch.decided = true - if err := ch.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersID: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersID: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersID: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersID: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersID: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersID: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 8bd6b3daff52..000000000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,781 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" - - "golang.org/x/crypto/chacha20" - "golang.org/x/crypto/poly1305" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type cipherMode struct { - keySize int - ivSize int - create func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) -} - -func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream, error)) func(key, iv []byte, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - return func(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - stream, err := createFunc(key, iv) - if err != nil { - return nil, err - } - - var streamDump []byte - if skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - mac := macModes[algs.MAC].new(macKey) - return &streamPacketCipher{ - mac: mac, - etm: macModes[algs.MAC].etm, - macResult: make([]byte, mac.Size()), - cipher: stream, - }, nil - } -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*cipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes192-ctr": {24, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - "aes256-ctr": {32, aes.BlockSize, streamCipherMode(0, newAESCTR)}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, streamCipherMode(1536, newRC4)}, - "arcfour256": {32, 0, streamCipherMode(1536, newRC4)}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, streamCipherMode(0, newRC4)}, - - // AEAD ciphers - gcmCipherID: {16, 12, newGCMCipher}, - chacha20Poly1305ID: {64, 0, newChaCha20Cipher}, - - // CBC mode is insecure and so is not included in the default config. - // (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, newAESCBCCipher}, - - // 3des-cbc is insecure and is not included in the default - // config. - tripledescbcID: {24, des.BlockSize, newTripleDESCBCCipher}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - etm bool - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readCipherPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - var encryptedPaddingLength [1]byte - if s.mac != nil && s.etm { - copy(encryptedPaddingLength[:], s.prefix[4:5]) - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } else { - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - if s.etm { - s.mac.Write(s.prefix[:4]) - s.mac.Write(encryptedPaddingLength[:]) - } else { - s.mac.Write(s.prefix[:]) - } - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - - if s.mac != nil && s.etm { - s.mac.Write(data) - } - - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - if !s.etm { - s.mac.Write(data) - } - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writeCipherPacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - aadlen := 0 - if s.mac != nil && s.etm { - // packet length is not encrypted for EtM modes - aadlen = 4 - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet)-aadlen)%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - - if s.etm { - // For EtM algorithms, the packet length must stay unencrypted, - // but the following data (padding length) must be encrypted - s.cipher.XORKeyStream(s.prefix[4:5], s.prefix[4:5]) - } - - s.mac.Write(s.prefix[:]) - - if !s.etm { - // For non-EtM algorithms, the algorithm is applied on unencrypted data - s.mac.Write(packet) - s.mac.Write(padding) - } - } - - if !(s.mac != nil && s.etm) { - // For EtM algorithms, the padding length has already been encrypted - // and the packet length must remain unencrypted - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - } - - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if s.mac != nil && s.etm { - // For EtM algorithms, packet and padding must be encrypted - s.mac.Write(packet) - s.mac.Write(padding) - } - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(key, iv, unusedMacKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(key, iv, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, key, iv, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readCipherPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readCipherPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - n, err := io.ReadFull(r, c.packetData[firstBlockLength:]) - if err != nil { - return nil, err - } - c.oracleCamouflage -= uint32(n) - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} - -const chacha20Poly1305ID = "chacha20-poly1305@openssh.com" - -// chacha20Poly1305Cipher implements the chacha20-poly1305@openssh.com -// AEAD, which is described here: -// -// https://tools.ietf.org/html/draft-josefsson-ssh-chacha20-poly1305-openssh-00 -// -// the methods here also implement padding, which RFC4253 Section 6 -// also requires of stream ciphers. -type chacha20Poly1305Cipher struct { - lengthKey [32]byte - contentKey [32]byte - buf []byte -} - -func newChaCha20Cipher(key, unusedIV, unusedMACKey []byte, unusedAlgs directionAlgorithms) (packetCipher, error) { - if len(key) != 64 { - panic(len(key)) - } - - c := &chacha20Poly1305Cipher{ - buf: make([]byte, 256), - } - - copy(c.contentKey[:], key[:32]) - copy(c.lengthKey[:], key[32:]) - return c, nil -} - -func (c *chacha20Poly1305Cipher) readCipherPacket(seqNum uint32, r io.Reader) ([]byte, error) { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return nil, err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - encryptedLength := c.buf[:4] - if _, err := io.ReadFull(r, encryptedLength); err != nil { - return nil, err - } - - var lenBytes [4]byte - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return nil, err - } - ls.XORKeyStream(lenBytes[:], encryptedLength) - - length := binary.BigEndian.Uint32(lenBytes[:]) - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - contentEnd := 4 + length - packetEnd := contentEnd + poly1305.TagSize - if uint32(cap(c.buf)) < packetEnd { - c.buf = make([]byte, packetEnd) - copy(c.buf[:], encryptedLength) - } else { - c.buf = c.buf[:packetEnd] - } - - if _, err := io.ReadFull(r, c.buf[4:packetEnd]); err != nil { - return nil, err - } - - var mac [poly1305.TagSize]byte - copy(mac[:], c.buf[contentEnd:packetEnd]) - if !poly1305.Verify(&mac, c.buf[:contentEnd], &polyKey) { - return nil, errors.New("ssh: MAC failure") - } - - plain := c.buf[4:contentEnd] - s.XORKeyStream(plain, plain) - - padding := plain[0] - if padding < 4 { - // padding is a byte, so it automatically satisfies - // the maximum size, which is 255. - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding)+1 >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - - plain = plain[1 : len(plain)-int(padding)] - - return plain, nil -} - -func (c *chacha20Poly1305Cipher) writeCipherPacket(seqNum uint32, w io.Writer, rand io.Reader, payload []byte) error { - nonce := make([]byte, 12) - binary.BigEndian.PutUint32(nonce[8:], seqNum) - s, err := chacha20.NewUnauthenticatedCipher(c.contentKey[:], nonce) - if err != nil { - return err - } - var polyKey, discardBuf [32]byte - s.XORKeyStream(polyKey[:], polyKey[:]) - s.XORKeyStream(discardBuf[:], discardBuf[:]) // skip the next 32 bytes - - // There is no blocksize, so fall back to multiple of 8 byte - // padding, as described in RFC 4253, Sec 6. - const packetSizeMultiple = 8 - - padding := packetSizeMultiple - (1+len(payload))%packetSizeMultiple - if padding < 4 { - padding += packetSizeMultiple - } - - // size (4 bytes), padding (1), payload, padding, tag. - totalLength := 4 + 1 + len(payload) + padding + poly1305.TagSize - if cap(c.buf) < totalLength { - c.buf = make([]byte, totalLength) - } else { - c.buf = c.buf[:totalLength] - } - - binary.BigEndian.PutUint32(c.buf, uint32(1+len(payload)+padding)) - ls, err := chacha20.NewUnauthenticatedCipher(c.lengthKey[:], nonce) - if err != nil { - return err - } - ls.XORKeyStream(c.buf, c.buf[:4]) - c.buf[4] = byte(padding) - copy(c.buf[5:], payload) - packetEnd := 5 + len(payload) + padding - if _, err := io.ReadFull(rand, c.buf[5+len(payload):packetEnd]); err != nil { - return err - } - - s.XORKeyStream(c.buf[4:], c.buf[4:packetEnd]) - - var mac [poly1305.TagSize]byte - poly1305.Sum(&mac, c.buf[:packetEnd], &polyKey) - - copy(c.buf[packetEnd:], mac[:]) - - if _, err := w.Write(c.buf); err != nil { - return err - } - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index 7b00bff1caa5..000000000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "net" - "os" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, TCP port/streamlocal forwarding and tunneled dialing. -type Client struct { - Conn - - handleForwardsOnce sync.Once // guards calling (*Client).handleForwards - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, chanSize) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.HostKeyCallback == nil { - c.Close() - return nil, nil, nil, errors.New("ssh: must specify HostKeyCallback") - } - - conn := &connection{ - sshConn: sshConn{conn: c}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.waitSession(); err != nil { - return err - } - - c.sessionID = c.transport.getSessionID() - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// HostKeyCallback is the function type used for verifying server -// keys. A HostKeyCallback must return nil if the host key is OK, or -// an error to reject it. It receives the hostname as passed to Dial -// or NewClientConn. The remote address is the RemoteAddr of the -// net.Conn underlying the SSH connection. -type HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - -// BannerCallback is the function type used for treat the banner sent by -// the server. A BannerCallback receives the message sent by the remote server. -type BannerCallback func(message string) error - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback is called during the cryptographic - // handshake to validate the server's host key. The client - // configuration must supply this callback for the connection - // to succeed. The functions InsecureIgnoreHostKey or - // FixedHostKey can be used for simplistic host key checks. - HostKeyCallback HostKeyCallback - - // BannerCallback is called during the SSH dance to display a custom - // server's message. The client configuration can supply this callback to - // handle it as wished. The function BannerDisplayStderr can be used for - // simplistic display on Stderr. - BannerCallback BannerCallback - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} - -// InsecureIgnoreHostKey returns a function that can be used for -// ClientConfig.HostKeyCallback to accept any host key. It should -// not be used for production code. -func InsecureIgnoreHostKey() HostKeyCallback { - return func(hostname string, remote net.Addr, key PublicKey) error { - return nil - } -} - -type fixedHostKey struct { - key PublicKey -} - -func (f *fixedHostKey) check(hostname string, remote net.Addr, key PublicKey) error { - if f.key == nil { - return fmt.Errorf("ssh: required host key was nil") - } - if !bytes.Equal(key.Marshal(), f.key.Marshal()) { - return fmt.Errorf("ssh: host key mismatch") - } - return nil -} - -// FixedHostKey returns a function for use in -// ClientConfig.HostKeyCallback to accept only a specific host key. -func FixedHostKey(key PublicKey) HostKeyCallback { - hk := &fixedHostKey{key} - return hk.check -} - -// BannerDisplayStderr returns a function that can be used for -// ClientConfig.BannerCallback to display banners on os.Stderr. -func BannerDisplayStderr() BannerCallback { - return func(banner string) error { - _, err := os.Stderr.WriteString(banner) - - return err - } -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index c611aeb68467..000000000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,641 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -type authResult int - -const ( - authFailure authResult = iota - authPartialSuccess - authSuccess -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - var tried []string - var lastMethods []string - - sessionID := c.transport.getSessionID() - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok == authSuccess { - // success - return nil - } else if ok == authFailure { - if m := auth.method(); !contains(tried, m) { - tried = append(tried, m) - } - } - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if contains(tried, candidateMethod) { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried) -} - -func contains(list []string, e string) bool { - for _, s := range list { - if s == e { - return true - } - } - return false -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return authFailure, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return authFailure, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - // Authentication is performed by sending an enquiry to test if a key is - // acceptable to the remote. If the key is acceptable, the client will - // attempt to authenticate with the valid key. If not the client will repeat - // the process with the remaining keys. - - signers, err := cb() - if err != nil { - return authFailure, nil, err - } - var methods []string - for _, signer := range signers { - ok, err := validateKey(signer.PublicKey(), user, c) - if err != nil { - return authFailure, nil, err - } - if !ok { - continue - } - - pub := signer.PublicKey() - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return authFailure, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return authFailure, nil, err - } - var success authResult - success, methods, err = handleAuthResponse(c) - if err != nil { - return authFailure, nil, err - } - - // If authentication succeeds or the list of available methods does not - // contain the "publickey" method, do not attempt to authenticate with any - // other keys. According to RFC 4252 Section 7, the latter can occur when - // additional authentication methods are required. - if success == authSuccess || !containsMethod(methods, cb.method()) { - return success, methods, err - } - } - - return authFailure, methods, nil -} - -func containsMethod(methods []string, method string) bool { - for _, m := range methods { - if m == method { - return true - } - } - - return false -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return false, err - } - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (authResult, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -func handleBannerResponse(c packetConn, packet []byte) error { - var msg userAuthBannerMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - transport, ok := c.(*handshakeTransport) - if !ok { - return nil - } - - if transport.bannerCallback != nil { - return transport.bannerCallback(msg.Message) - } - - return nil -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns an AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return authFailure, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - if err := handleBannerResponse(c, packet); err != nil { - return authFailure, nil, err - } - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthSuccess: - return authSuccess, nil, nil - default: - return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return authFailure, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return authFailure, nil, err - } - - if len(answers) != len(prompts) { - return authFailure, nil, fmt.Errorf("ssh: incorrect number of answers from keyboard-interactive callback %d (expected %d)", len(answers), len(prompts)) - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return authFailure, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok != authFailure || err != nil { // either success, partial success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} - -// GSSAPIWithMICAuthMethod is an AuthMethod with "gssapi-with-mic" authentication. -// See RFC 4462 section 3 -// gssAPIClient is implementation of the GSSAPIClient interface, see the definition of the interface for details. -// target is the server host you want to log in to. -func GSSAPIWithMICAuthMethod(gssAPIClient GSSAPIClient, target string) AuthMethod { - if gssAPIClient == nil { - panic("gss-api client must be not nil with enable gssapi-with-mic") - } - return &gssAPIWithMICCallback{gssAPIClient: gssAPIClient, target: target} -} - -type gssAPIWithMICCallback struct { - gssAPIClient GSSAPIClient - target string -} - -func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) { - m := &userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: g.method(), - } - // The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST. - // See RFC 4462 section 3.2. - m.Payload = appendU32(m.Payload, 1) - m.Payload = appendString(m.Payload, string(krb5OID)) - if err := c.writePacket(Marshal(m)); err != nil { - return authFailure, nil, err - } - // The server responds to the SSH_MSG_USERAUTH_REQUEST with either an - // SSH_MSG_USERAUTH_FAILURE if none of the mechanisms are supported or - // with an SSH_MSG_USERAUTH_GSSAPI_RESPONSE. - // See RFC 4462 section 3.3. - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication,so I don't want to check - // selected mech if it is valid. - packet, err := c.readPacket() - if err != nil { - return authFailure, nil, err - } - userAuthGSSAPIResp := &userAuthGSSAPIResponse{} - if err := Unmarshal(packet, userAuthGSSAPIResp); err != nil { - return authFailure, nil, err - } - // Start the loop into the exchange token. - // See RFC 4462 section 3.4. - var token []byte - defer g.gssAPIClient.DeleteSecContext() - for { - // Initiates the establishment of a security context between the application and a remote peer. - nextToken, needContinue, err := g.gssAPIClient.InitSecContext("host@"+g.target, token, false) - if err != nil { - return authFailure, nil, err - } - if len(nextToken) > 0 { - if err := c.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: nextToken, - })); err != nil { - return authFailure, nil, err - } - } - if !needContinue { - break - } - packet, err = c.readPacket() - if err != nil { - return authFailure, nil, err - } - switch packet[0] { - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return authFailure, nil, err - } - if msg.PartialSuccess { - return authPartialSuccess, msg.Methods, nil - } - return authFailure, msg.Methods, nil - case msgUserAuthGSSAPIError: - userAuthGSSAPIErrorResp := &userAuthGSSAPIError{} - if err := Unmarshal(packet, userAuthGSSAPIErrorResp); err != nil { - return authFailure, nil, err - } - return authFailure, nil, fmt.Errorf("GSS-API Error:\n"+ - "Major Status: %d\n"+ - "Minor Status: %d\n"+ - "Error Message: %s\n", userAuthGSSAPIErrorResp.MajorStatus, userAuthGSSAPIErrorResp.MinorStatus, - userAuthGSSAPIErrorResp.Message) - case msgUserAuthGSSAPIToken: - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return authFailure, nil, err - } - token = userAuthGSSAPITokenReq.Token - } - } - // Binding Encryption Keys. - // See RFC 4462 section 3.5. - micField := buildMIC(string(session), user, "ssh-connection", "gssapi-with-mic") - micToken, err := g.gssAPIClient.GetMIC(micField) - if err != nil { - return authFailure, nil, err - } - if err := c.writePacket(Marshal(&userAuthGSSAPIMIC{ - MIC: micToken, - })); err != nil { - return authFailure, nil, err - } - return handleAuthResponse(c) -} - -func (g *gssAPIWithMICCallback) method() string { - return "gssapi-with-mic" -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index 290382d059ef..000000000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,404 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "math" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers lists ciphers we support but might not recommend. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "arcfour256", "arcfour128", "arcfour", - aes128cbcID, - tripledescbcID, -} - -// preferredCiphers specifies the default preference for ciphers. -var preferredCiphers = []string{ - "aes128-gcm@openssh.com", - chacha20Poly1305ID, - "aes128-ctr", "aes192-ctr", "aes256-ctr", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden -// for the server half. -var serverForbiddenKexAlgos = map[string]struct{}{ - kexAlgoDHGEXSHA1: {}, // server half implementation is only minimal to satisfy the automated tests - kexAlgoDHGEXSHA256: {}, // server half implementation is only minimal to satisfy the automated tests -} - -// preferredKexAlgos specifies the default preference for key-exchange algorithms -// in preference order. -var preferredKexAlgos = []string{ - kexAlgoCurve25519SHA256, - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, -} - -// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256-etm@openssh.com", "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -// directionAlgorithms records algorithm choices in one direction (either read or write) -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -// rekeyBytes returns a rekeying intervals in bytes. -func (a *directionAlgorithms) rekeyBytes() int64 { - // According to RFC4344 block ciphers should rekey after - // 2^(BLOCKSIZE/4) blocks. For all AES flavors BLOCKSIZE is - // 128. - switch a.Cipher { - case "aes128-ctr", "aes192-ctr", "aes256-ctr", gcmCipherID, aes128cbcID: - return 16 * (1 << 32) - - } - - // For others, stick with RFC4253 recommendation to rekey after 1 Gb of data. - return 1 << 30 -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - stoc, ctos := &result.w, &result.r - if isClient { - ctos, stoc = stoc, ctos - } - - ctos.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - stoc.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - stoc.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, a size suitable for the chosen cipher is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = preferredCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = preferredKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // cipher specific default - } else if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } else if c.RekeyThreshold >= math.MaxInt64 { - // Avoid weirdness if somebody uses -1 as a threshold. - c.RekeyThreshold = math.MaxInt64 - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionID, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index fd6b0681b512..000000000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the session hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index 67b7322c0580..000000000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 - -This package does not fall under the stability promise of the Go language itself, -so its API may be changed when pressing needs arise. -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 2b10b05a498c..000000000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// chanSize sets the amount of buffering SSH connections. This is -// primarily for testing: setting chanSize=0 uncovers deadlocks more -// quickly. -const chanSize = 16 - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - mu sync.Mutex - writeError error - sentInitPacket []byte - sentInitMsg *kexInitMsg - pendingPackets [][]byte // Used when a key exchange is in progress. - - // If the read loop wants to schedule a kex, it pings this - // channel, and the write loop will send out a kex - // message. - requestKex chan struct{} - - // If the other side requests or confirms a kex, its kexInit - // packet is sent here for the write loop to find it. - startKex chan *pendingKex - - // data for host key checking - hostKeyCallback HostKeyCallback - dialAddress string - remoteAddr net.Addr - - // bannerCallback is non-empty if we are the client and it has been set in - // ClientConfig. In that case it is called during the user authentication - // dance to handle a custom server's message. - bannerCallback BannerCallback - - // Algorithms agreed in the last key exchange. - algorithms *algorithms - - readPacketsLeft uint32 - readBytesLeft int64 - - writePacketsLeft uint32 - writeBytesLeft int64 - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -type pendingKex struct { - otherInit []byte - done chan error -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, chanSize), - requestKex: make(chan struct{}, 1), - startKex: make(chan *pendingKex, 1), - - config: config, - } - t.resetReadThresholds() - t.resetWriteThresholds() - - // We always start with a mandatory key exchange. - t.requestKex <- struct{}{} - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - t.bannerCallback = config.BannerCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - go t.kexLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - go t.kexLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -// waitSession waits for the session to be established. This should be -// the first thing to call after instantiating handshakeTransport. -func (t *handshakeTransport) waitSession() error { - p, err := t.readPacket() - if err != nil { - return err - } - if p[0] != msgNewKeys { - return fmt.Errorf("ssh: first packet should be msgNewKeys") - } - - return nil -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) printPacket(p []byte, write bool) { - action := "got" - if write { - action = "sent" - } - - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s %s data (packet %d bytes)", t.id(), action, len(p)) - } else { - msg, err := decode(p) - log.Printf("%s %s %T %v (%v)", t.id(), action, msg, msg, err) - } -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - first := true - for { - p, err := t.readOnePacket(first) - first = false - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // Stop writers too. - t.recordWriteError(t.readError) - - // Unblock the writer should it wait for this. - close(t.startKex) - - // Don't close t.requestKex; it's also written to from writePacket. -} - -func (t *handshakeTransport) pushPacket(p []byte) error { - if debugHandshake { - t.printPacket(p, true) - } - return t.conn.writePacket(p) -} - -func (t *handshakeTransport) getWriteError() error { - t.mu.Lock() - defer t.mu.Unlock() - return t.writeError -} - -func (t *handshakeTransport) recordWriteError(err error) { - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil && err != nil { - t.writeError = err - } -} - -func (t *handshakeTransport) requestKeyExchange() { - select { - case t.requestKex <- struct{}{}: - default: - // something already requested a kex, so do nothing. - } -} - -func (t *handshakeTransport) resetWriteThresholds() { - t.writePacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.writeBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.writeBytesLeft = t.algorithms.w.rekeyBytes() - } else { - t.writeBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) kexLoop() { - -write: - for t.getWriteError() == nil { - var request *pendingKex - var sent bool - - for request == nil || !sent { - var ok bool - select { - case request, ok = <-t.startKex: - if !ok { - break write - } - case <-t.requestKex: - break - } - - if !sent { - if err := t.sendKexInit(); err != nil { - t.recordWriteError(err) - break - } - sent = true - } - } - - if err := t.getWriteError(); err != nil { - if request != nil { - request.done <- err - } - break - } - - // We're not servicing t.requestKex, but that is OK: - // we never block on sending to t.requestKex. - - // We're not servicing t.startKex, but the remote end - // has just sent us a kexInitMsg, so it can't send - // another key change request, until we close the done - // channel on the pendingKex request. - - err := t.enterKeyExchange(request.otherInit) - - t.mu.Lock() - t.writeError = err - t.sentInitPacket = nil - t.sentInitMsg = nil - - t.resetWriteThresholds() - - // we have completed the key exchange. Since the - // reader is still blocked, it is safe to clear out - // the requestKex channel. This avoids the situation - // where: 1) we consumed our own request for the - // initial kex, and 2) the kex from the remote side - // caused another send on the requestKex channel, - clear: - for { - select { - case <-t.requestKex: - // - default: - break clear - } - } - - request.done <- t.writeError - - // kex finished. Push packets that we received while - // the kex was in progress. Don't look at t.startKex - // and don't increment writtenSinceKex: if we trigger - // another kex while we are still busy with the last - // one, things will become very confusing. - for _, p := range t.pendingPackets { - t.writeError = t.pushPacket(p) - if t.writeError != nil { - break - } - } - t.pendingPackets = t.pendingPackets[:0] - t.mu.Unlock() - } - - // drain startKex channel. We don't service t.requestKex - // because nobody does blocking sends there. - go func() { - for init := range t.startKex { - init.done <- t.writeError - } - }() - - // Unblock reader. - t.conn.Close() -} - -// The protocol uses uint32 for packet counters, so we can't let them -// reach 1<<32. We will actually read and write more packets than -// this, though: the other side may send more packets, and after we -// hit this limit on writing we will send a few more packets for the -// key exchange itself. -const packetRekeyThreshold = (1 << 31) - -func (t *handshakeTransport) resetReadThresholds() { - t.readPacketsLeft = packetRekeyThreshold - if t.config.RekeyThreshold > 0 { - t.readBytesLeft = int64(t.config.RekeyThreshold) - } else if t.algorithms != nil { - t.readBytesLeft = t.algorithms.r.rekeyBytes() - } else { - t.readBytesLeft = 1 << 30 - } -} - -func (t *handshakeTransport) readOnePacket(first bool) ([]byte, error) { - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - if t.readPacketsLeft > 0 { - t.readPacketsLeft-- - } else { - t.requestKeyExchange() - } - - if t.readBytesLeft > 0 { - t.readBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if debugHandshake { - t.printPacket(p, false) - } - - if first && p[0] != msgKexInit { - return nil, fmt.Errorf("ssh: first packet should be msgKexInit") - } - - if p[0] != msgKexInit { - return p, nil - } - - firstKex := t.sessionID == nil - - kex := pendingKex{ - done: make(chan error, 1), - otherInit: p, - } - t.startKex <- &kex - err = <-kex.done - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - if err != nil { - return nil, err - } - - t.resetReadThresholds() - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// sendKexInit sends a key change message. -func (t *handshakeTransport) sendKexInit() error { - t.mu.Lock() - defer t.mu.Unlock() - if t.sentInitMsg != nil { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - return nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.pushPacket(packetCopy); err != nil { - return err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - - return nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - } - - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError != nil { - return t.writeError - } - - if t.sentInitMsg != nil { - // Copy the packet so the writer can reuse the buffer. - cp := make([]byte, len(p)) - copy(cp, p) - t.pendingPackets = append(t.pendingPackets, cp) - return nil - } - - if t.writeBytesLeft > 0 { - t.writeBytesLeft -= int64(len(p)) - } else { - t.requestKeyExchange() - } - - if t.writePacketsLeft > 0 { - t.writePacketsLeft-- - } else { - t.requestKeyExchange() - } - - if err := t.pushPacket(p); err != nil { - t.writeError = err - } - - return nil -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: t.sentInitPacket, - } - - clientInit := otherInit - serverInit := t.sentInitMsg - isClient := len(t.hostKeys) == 0 - if isClient { - clientInit, serverInit = serverInit, clientInit - - magics.clientKexInit = t.sentInitPacket - magics.serverKexInit = otherInitPacket - } - - var err error - t.algorithms, err = findAgreedAlgorithms(isClient, clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[t.algorithms.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", t.algorithms.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, t.algorithms, &magics) - } else { - result, err = t.client(kex, t.algorithms, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - if err := t.conn.prepareKeyChange(t.algorithms, result); err != nil { - return err - } - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go b/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go deleted file mode 100644 index af81d266546e..000000000000 --- a/vendor/golang.org/x/crypto/ssh/internal/bcrypt_pbkdf/bcrypt_pbkdf.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package bcrypt_pbkdf implements bcrypt_pbkdf(3) from OpenBSD. -// -// See https://flak.tedunangst.com/post/bcrypt-pbkdf and -// https://cvsweb.openbsd.org/cgi-bin/cvsweb/src/lib/libutil/bcrypt_pbkdf.c. -package bcrypt_pbkdf - -import ( - "crypto/sha512" - "errors" - "golang.org/x/crypto/blowfish" -) - -const blockSize = 32 - -// Key derives a key from the password, salt and rounds count, returning a -// []byte of length keyLen that can be used as cryptographic key. -func Key(password, salt []byte, rounds, keyLen int) ([]byte, error) { - if rounds < 1 { - return nil, errors.New("bcrypt_pbkdf: number of rounds is too small") - } - if len(password) == 0 { - return nil, errors.New("bcrypt_pbkdf: empty password") - } - if len(salt) == 0 || len(salt) > 1<<20 { - return nil, errors.New("bcrypt_pbkdf: bad salt length") - } - if keyLen > 1024 { - return nil, errors.New("bcrypt_pbkdf: keyLen is too large") - } - - numBlocks := (keyLen + blockSize - 1) / blockSize - key := make([]byte, numBlocks*blockSize) - - h := sha512.New() - h.Write(password) - shapass := h.Sum(nil) - - shasalt := make([]byte, 0, sha512.Size) - cnt, tmp := make([]byte, 4), make([]byte, blockSize) - for block := 1; block <= numBlocks; block++ { - h.Reset() - h.Write(salt) - cnt[0] = byte(block >> 24) - cnt[1] = byte(block >> 16) - cnt[2] = byte(block >> 8) - cnt[3] = byte(block) - h.Write(cnt) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - - out := make([]byte, blockSize) - copy(out, tmp) - for i := 2; i <= rounds; i++ { - h.Reset() - h.Write(tmp) - bcryptHash(tmp, shapass, h.Sum(shasalt)) - for j := 0; j < len(out); j++ { - out[j] ^= tmp[j] - } - } - - for i, v := range out { - key[i*numBlocks+(block-1)] = v - } - } - return key[:keyLen], nil -} - -var magic = []byte("OxychromaticBlowfishSwatDynamite") - -func bcryptHash(out, shapass, shasalt []byte) { - c, err := blowfish.NewSaltedCipher(shapass, shasalt) - if err != nil { - panic(err) - } - for i := 0; i < 64; i++ { - blowfish.ExpandKey(shasalt, c) - blowfish.ExpandKey(shapass, c) - } - copy(out, magic) - for i := 0; i < 32; i += 8 { - for j := 0; j < 64; j++ { - c.Encrypt(out[i:i+8], out[i:i+8]) - } - } - // Swap bytes due to different endianness. - for i := 0; i < 32; i += 4 { - out[i+3], out[i+2], out[i+1], out[i] = out[i], out[i+1], out[i+2], out[i+3] - } -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index 766e9293975e..000000000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,782 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" - - // For the following kex only the client half contains a production - // ready implementation. The server half only consists of a minimal - // implementation to satisfy the automated tests. - kexAlgoDHGEXSHA1 = "diffie-hellman-group-exchange-sha1" - kexAlgoDHGEXSHA256 = "diffie-hellman-group-exchange-sha256" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - ki, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - ki, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, err -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} - kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1} - kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - ki := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(ki)) - marshalInt(K, ki) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} - -// dhGEXSHA implements the diffie-hellman-group-exchange-sha1 and -// diffie-hellman-group-exchange-sha256 key agreement protocols, -// as described in RFC 4419 -type dhGEXSHA struct { - g, p *big.Int - hashFunc crypto.Hash -} - -const ( - dhGroupExchangeMinimumBits = 2048 - dhGroupExchangePreferredBits = 2048 - dhGroupExchangeMaximumBits = 8192 -) - -func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 { - return nil, fmt.Errorf("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil -} - -func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - // Send GexRequest - kexDHGexRequest := kexDHGexRequestMsg{ - MinBits: dhGroupExchangeMinimumBits, - PreferedBits: dhGroupExchangePreferredBits, - MaxBits: dhGroupExchangeMaximumBits, - } - if err := c.writePacket(Marshal(&kexDHGexRequest)); err != nil { - return nil, err - } - - // Receive GexGroup - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexGroup kexDHGexGroupMsg - if err = Unmarshal(packet, &kexDHGexGroup); err != nil { - return nil, err - } - - // reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits - if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits { - return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen()) - } - - gex.p = kexDHGexGroup.P - gex.g = kexDHGexGroup.G - - // Check if g is safe by verifing that g > 1 and g < p - 1 - one := big.NewInt(1) - var pMinusOne = &big.Int{} - pMinusOne.Sub(gex.p, one) - if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 { - return nil, fmt.Errorf("ssh: server provided gex g is not safe") - } - - // Send GexInit - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) - x, err := rand.Int(randSource, pHalf) - if err != nil { - return nil, err - } - X := new(big.Int).Exp(gex.g, x, gex.p) - kexDHGexInit := kexDHGexInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHGexInit)); err != nil { - return nil, err - } - - // Receive GexReply - packet, err = c.readPacket() - if err != nil { - return nil, err - } - - var kexDHGexReply kexDHGexReplyMsg - if err = Unmarshal(packet, &kexDHGexReply); err != nil { - return nil, err - } - - kInt, err := gex.diffieHellman(kexDHGexReply.Y, x) - if err != nil { - return nil, err - } - - // Check if k is safe by verifing that k > 1 and k < p - 1 - if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 { - return nil, fmt.Errorf("ssh: derived k is not safe") - } - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, kexDHGexReply.HostKey) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) - writeInt(h, X) - writeInt(h, kexDHGexReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHGexReply.HostKey, - Signature: kexDHGexReply.Signature, - Hash: gex.hashFunc, - }, nil -} - -// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256. -// -// This is a minimal implementation to satisfy the automated tests. -func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - // Receive GexRequest - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHGexRequest kexDHGexRequestMsg - if err = Unmarshal(packet, &kexDHGexRequest); err != nil { - return - } - - // smoosh the user's preferred size into our own limits - if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits - } - if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits { - kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits - } - // fix min/max if they're inconsistent. technically, we could just pout - // and hang up, but there's no harm in giving them the benefit of the - // doubt and just picking a bitsize for them. - if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits { - kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits - } - if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits { - kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits - } - - // Send GexGroup - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - gex.p = p - gex.g = big.NewInt(2) - - kexDHGexGroup := kexDHGexGroupMsg{ - P: gex.p, - G: gex.g, - } - if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil { - return nil, err - } - - // Receive GexInit - packet, err = c.readPacket() - if err != nil { - return - } - var kexDHGexInit kexDHGexInitMsg - if err = Unmarshal(packet, &kexDHGexInit); err != nil { - return - } - - var pHalf = &big.Int{} - pHalf.Rsh(gex.p, 1) - - y, err := rand.Int(randSource, pHalf) - if err != nil { - return - } - - Y := new(big.Int).Exp(gex.g, y, gex.p) - kInt, err := gex.diffieHellman(kexDHGexInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := gex.hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits)) - binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits)) - writeInt(h, gex.p) - writeInt(h, gex.g) - writeInt(h, kexDHGexInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHGexReply := kexDHGexReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHGexReply) - - err = c.writePacket(packet) - - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: gex.hashFunc, - }, err -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index 31f26349a05f..000000000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,1474 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/aes" - "crypto/cipher" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/md5" - "crypto/rsa" - "crypto/sha256" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/hex" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" - "golang.org/x/crypto/ssh/internal/bcrypt_pbkdf" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoSKECDSA256 = "sk-ecdsa-sha2-nistp256@openssh.com" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" - KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com" -) - -// These constants represent non-default signature algorithms that are supported -// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See -// [PROTOCOL.agent] section 4.5.1 and -// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10 -const ( - SigAlgoRSA = "ssh-rsa" - SigAlgoRSASHA2256 = "rsa-sha2-256" - SigAlgoRSASHA2512 = "rsa-sha2-512" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoSKECDSA256: - return parseSKECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case KeyAlgoSKED25519: - return parseSKEd25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. To unmarshal the returned data, use - // the ParsePublicKey function. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -// A AlgorithmSigner is a Signer that also supports specifying a specific -// algorithm to use for signing. -type AlgorithmSigner interface { - Signer - - // SignWithAlgorithm is like Signer.Sign, but allows specification of a - // non-default signing algorithm. See the SigAlgo* constants in this - // package for signature algorithms supported by this package. Callers may - // pass an empty string for the algorithm in which case the AlgorithmSigner - // will use its default algorithm. - SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - var hash crypto.Hash - switch sig.Format { - case SigAlgoRSA: - hash = crypto.SHA1 - case SigAlgoRSASHA2256: - hash = crypto.SHA256 - case SigAlgoRSASHA2512: - hash = crypto.SHA512 - default: - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := hash.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), hash, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (k *dsaPublicKey) Type() string { - return "ssh-dss" -} - -func checkDSAParams(param *dsa.Parameters) error { - // SSH specifies FIPS 186-2, which only provided a single size - // (1024 bits) DSA key. FIPS 186-3 allows for larger key - // sizes, which would confuse SSH. - if l := param.P.BitLen(); l != 1024 { - return fmt.Errorf("ssh: unsupported DSA key size %d", l) - } - - return nil -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - param := dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - } - if err := checkDSAParams(¶m); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: param, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - return k.SignWithAlgorithm(rand, data, "") -} - -func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - if algorithm != "" && algorithm != k.PublicKey().Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (k *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + k.nistID() -} - -func (k *ecdsaPublicKey) nistID() string { - switch k.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (k ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - return ed25519PublicKey(w.KeyBytes), w.Rest, nil -} - -func (k ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(k), - } - return Marshal(&w) -} - -func (k ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k); l != ed25519.PublicKeySize { - return fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - - if ok := ed25519.Verify(ed25519.PublicKey(k), b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (k *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - k.Type(), - k.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(k), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// skFields holds the additional fields present in U2F/FIDO2 signatures. -// See openssh/PROTOCOL.u2f 'SSH U2F Signatures' for details. -type skFields struct { - // Flags contains U2F/FIDO2 flags such as 'user present' - Flags byte - // Counter is a monotonic signature counter which can be - // used to detect concurrent use of a private key, should - // it be extracted from hardware. - Counter uint32 -} - -type skECDSAPublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ecdsa.PublicKey -} - -func (k *skECDSAPublicKey) Type() string { - return KeyAlgoSKECDSA256 -} - -func (k *skECDSAPublicKey) nistID() string { - return "nistp256" -} - -func parseSKECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(skECDSAPublicKey) - key.application = w.Application - - if w.Curve != "nistp256" { - return nil, nil, errors.New("ssh: unsupported curve") - } - key.Curve = elliptic.P256() - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - - return key, w.Rest, nil -} - -func (k *skECDSAPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(k.Curve, k.X, k.Y) - w := struct { - Name string - ID string - Key []byte - Application string - }{ - k.Type(), - k.nistID(), - keyBytes, - k.application, - } - - return Marshal(&w) -} - -func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - - h := ecHash(k.Curve).New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var ecSig struct { - R *big.Int - S *big.Int - } - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - h.Reset() - h.Write(original) - digest := h.Sum(nil) - - if ecdsa.Verify((*ecdsa.PublicKey)(&k.PublicKey), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -type skEd25519PublicKey struct { - // application is a URL-like string, typically "ssh:" for SSH. - // see openssh/PROTOCOL.u2f for details. - application string - ed25519.PublicKey -} - -func (k *skEd25519PublicKey) Type() string { - return KeyAlgoSKED25519 -} - -func parseSKEd25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Application string - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if l := len(w.KeyBytes); l != ed25519.PublicKeySize { - return nil, nil, fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - key := new(skEd25519PublicKey) - key.application = w.Application - key.PublicKey = ed25519.PublicKey(w.KeyBytes) - - return key, w.Rest, nil -} - -func (k *skEd25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - Application string - }{ - KeyAlgoSKED25519, - []byte(k.PublicKey), - k.application, - } - return Marshal(&w) -} - -func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - if l := len(k.PublicKey); l != ed25519.PublicKeySize { - return fmt.Errorf("invalid size %d for Ed25519 public key", l) - } - - h := sha256.New() - h.Write([]byte(k.application)) - appDigest := h.Sum(nil) - - h.Reset() - h.Write(data) - dataDigest := h.Sum(nil) - - var edSig struct { - Signature []byte `ssh:"rest"` - } - - if err := Unmarshal(sig.Blob, &edSig); err != nil { - return err - } - - var skf skFields - if err := Unmarshal(sig.Rest, &skf); err != nil { - return err - } - - blob := struct { - ApplicationDigest []byte `ssh:"rest"` - Flags byte - Counter uint32 - MessageDigest []byte `ssh:"rest"` - }{ - appDigest, - skf.Flags, - skf.Counter, - dataDigest, - } - - original := Marshal(blob) - - if ok := ed25519.Verify(k.PublicKey, original, edSig.Signature); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a -// corresponding Signer instance. ECDSA keys must use P-256, P-384 or -// P-521. DSA keys must use parameter size L1024N160. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return newDSAPrivateKey(key) - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) { - if err := checkDSAParams(&key.PublicKey.Parameters); err != nil { - return nil, err - } - - return &dsaPrivateKey{key}, nil -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.SignWithAlgorithm(rand, data, "") -} - -func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) { - var hashFunc crypto.Hash - - if _, ok := s.pubKey.(*rsaPublicKey); ok { - // RSA keys support a few hash functions determined by the requested signature algorithm - switch algorithm { - case "", SigAlgoRSA: - algorithm = SigAlgoRSA - hashFunc = crypto.SHA1 - case SigAlgoRSASHA2256: - hashFunc = crypto.SHA256 - case SigAlgoRSASHA2512: - hashFunc = crypto.SHA512 - default: - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - } else { - // The only supported algorithm for all other key types is the same as the type of the key - if algorithm == "" { - algorithm = s.pubKey.Type() - } else if algorithm != s.pubKey.Type() { - return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm) - } - - switch key := s.pubKey.(type) { - case *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: algorithm, - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - if l := len(key); l != ed25519.PublicKeySize { - return nil, fmt.Errorf("ssh: invalid size %d for Ed25519 public key", l) - } - return ed25519PublicKey(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. If the private key is encrypted, it -// will return a PassphraseMissingError. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// ParsePrivateKeyWithPassphrase returns a Signer from a PEM encoded private -// key and passphrase. It supports the same keys as -// ParseRawPrivateKeyWithPassphrase. -func ParsePrivateKeyWithPassphrase(pemBytes, passphrase []byte) (Signer, error) { - key, err := ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// A PassphraseMissingError indicates that parsing this private key requires a -// passphrase. Use ParsePrivateKeyWithPassphrase. -type PassphraseMissingError struct { - // PublicKey will be set if the private key format includes an unencrypted - // public key along with the encrypted private key. - PublicKey PublicKey -} - -func (*PassphraseMissingError) Error() string { - return "ssh: this private key is passphrase protected" -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), PKCS#8, DSA (OpenSSL), and ECDSA private keys. If the -// private key is encrypted, it will return a PassphraseMissingError. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, &PassphraseMissingError{} - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - // RFC5208 - https://tools.ietf.org/html/rfc5208 - case "PRIVATE KEY": - return x509.ParsePKCS8PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes, unencryptedOpenSSHKey) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseRawPrivateKeyWithPassphrase returns a private key decrypted with -// passphrase from a PEM encoded private key. If the passphrase is wrong, it -// will return x509.IncorrectPasswordError. -func ParseRawPrivateKeyWithPassphrase(pemBytes, passphrase []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if block.Type == "OPENSSH PRIVATE KEY" { - return parseOpenSSHPrivateKey(block.Bytes, passphraseProtectedOpenSSHKey(passphrase)) - } - - if !encryptedBlock(block) || !x509.IsEncryptedPEMBlock(block) { - return nil, errors.New("ssh: not an encrypted key") - } - - buf, err := x509.DecryptPEMBlock(block, passphrase) - if err != nil { - if err == x509.IncorrectPasswordError { - return nil, err - } - return nil, fmt.Errorf("ssh: cannot decode encrypted private keys: %v", err) - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(buf) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(buf) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(buf) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Pub *big.Int - Priv *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Pub, - }, - X: k.Priv, - }, nil -} - -func unencryptedOpenSSHKey(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName != "none" || cipherName != "none" { - return nil, &PassphraseMissingError{} - } - if kdfOpts != "" { - return nil, errors.New("ssh: invalid openssh private key") - } - return privKeyBlock, nil -} - -func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc { - return func(cipherName, kdfName, kdfOpts string, privKeyBlock []byte) ([]byte, error) { - if kdfName == "none" || cipherName == "none" { - return nil, errors.New("ssh: key is not password protected") - } - if kdfName != "bcrypt" { - return nil, fmt.Errorf("ssh: unknown KDF %q, only supports %q", kdfName, "bcrypt") - } - - var opts struct { - Salt string - Rounds uint32 - } - if err := Unmarshal([]byte(kdfOpts), &opts); err != nil { - return nil, err - } - - k, err := bcrypt_pbkdf.Key(passphrase, []byte(opts.Salt), int(opts.Rounds), 32+16) - if err != nil { - return nil, err - } - key, iv := k[:32], k[32:] - - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - switch cipherName { - case "aes256-ctr": - ctr := cipher.NewCTR(c, iv) - ctr.XORKeyStream(privKeyBlock, privKeyBlock) - case "aes256-cbc": - if len(privKeyBlock)%c.BlockSize() != 0 { - return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size") - } - cbc := cipher.NewCBCDecrypter(c, iv) - cbc.CryptBlocks(privKeyBlock, privKeyBlock) - default: - return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc") - } - - return privKeyBlock, nil - } -} - -type openSSHDecryptFunc func(CipherName, KdfName, KdfOpts string, PrivKeyBlock []byte) ([]byte, error) - -// parseOpenSSHPrivateKey parses an OpenSSH private key, using the decrypt -// function to unwrap the encrypted portion. unencryptedOpenSSHKey can be used -// as the decrypt function to parse an unencrypted private key. See -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key. -func parseOpenSSHPrivateKey(key []byte, decrypt openSSHDecryptFunc) (crypto.PrivateKey, error) { - const magic = "openssh-key-v1\x00" - if len(key) < len(magic) || string(key[:len(magic)]) != magic { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - if w.NumKeys != 1 { - // We only support single key files, and so does OpenSSH. - // https://github.com/openssh/openssh-portable/blob/4103a3ec7/sshkey.c#L4171 - return nil, errors.New("ssh: multi-key files are not supported") - } - - privKeyBlock, err := decrypt(w.CipherName, w.KdfName, w.KdfOpts, w.PrivKeyBlock) - if err != nil { - if err, ok := err.(*PassphraseMissingError); ok { - pub, errPub := ParsePublicKey(w.PubKey) - if errPub != nil { - return nil, fmt.Errorf("ssh: failed to parse embedded public key: %v", errPub) - } - err.PublicKey = pub - } - return nil, err - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Rest []byte `ssh:"rest"` - }{} - - if err := Unmarshal(privKeyBlock, &pk1); err != nil || pk1.Check1 != pk1.Check2 { - if w.CipherName != "none" { - return nil, x509.IncorrectPasswordError - } - return nil, errors.New("ssh: malformed OpenSSH key") - } - - switch pk1.Keytype { - case KeyAlgoRSA: - // https://github.com/openssh/openssh-portable/blob/master/sshkey.c#L2760-L2773 - key := struct { - N *big.Int - E *big.Int - D *big.Int - Iqmp *big.Int - P *big.Int - Q *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := &rsa.PrivateKey{ - PublicKey: rsa.PublicKey{ - N: key.N, - E: int(key.E.Int64()), - }, - D: key.D, - Primes: []*big.Int{key.P, key.Q}, - } - - if err := pk.Validate(); err != nil { - return nil, err - } - - pk.Precompute() - - return pk, nil - case KeyAlgoED25519: - key := struct { - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if len(key.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, key.Priv) - return &pk, nil - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - key := struct { - Curve string - Pub []byte - D *big.Int - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(pk1.Rest, &key); err != nil { - return nil, err - } - - if err := checkOpenSSHKeyPadding(key.Pad); err != nil { - return nil, err - } - - var curve elliptic.Curve - switch key.Curve { - case "nistp256": - curve = elliptic.P256() - case "nistp384": - curve = elliptic.P384() - case "nistp521": - curve = elliptic.P521() - default: - return nil, errors.New("ssh: unhandled elliptic curve: " + key.Curve) - } - - X, Y := elliptic.Unmarshal(curve, key.Pub) - if X == nil || Y == nil { - return nil, errors.New("ssh: failed to unmarshal public key") - } - - if key.D.Cmp(curve.Params().N) >= 0 { - return nil, errors.New("ssh: scalar is out of range") - } - - x, y := curve.ScalarBaseMult(key.D.Bytes()) - if x.Cmp(X) != 0 || y.Cmp(Y) != 0 { - return nil, errors.New("ssh: public key does not match private key") - } - - return &ecdsa.PrivateKey{ - PublicKey: ecdsa.PublicKey{ - Curve: curve, - X: X, - Y: Y, - }, - D: key.D, - }, nil - default: - return nil, errors.New("ssh: unhandled key type") - } -} - -func checkOpenSSHKeyPadding(pad []byte) error { - for i, b := range pad { - if int(b) != i+1 { - return errors.New("ssh: padding not as expected") - } - } - return nil -} - -// FingerprintLegacyMD5 returns the user presentation of the key's -// fingerprint as described by RFC 4716 section 4. -func FingerprintLegacyMD5(pubKey PublicKey) string { - md5sum := md5.Sum(pubKey.Marshal()) - hexarray := make([]string, len(md5sum)) - for i, c := range md5sum { - hexarray[i] = hex.EncodeToString([]byte{c}) - } - return strings.Join(hexarray, ":") -} - -// FingerprintSHA256 returns the user presentation of the key's -// fingerprint as unpadded base64 encoded sha256 hash. -// This format was introduced from OpenSSH 6.8. -// https://www.openssh.com/txt/release-6.8 -// https://tools.ietf.org/html/rfc4648#section-3.2 (unpadded base64 encoding) -func FingerprintSHA256(pubKey PublicKey) string { - sha256sum := sha256.Sum256(pubKey.Marshal()) - hash := base64.RawStdEncoding.EncodeToString(sha256sum[:]) - return "SHA256:" + hash -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index c07a06285e66..000000000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - etm bool - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256-etm@openssh.com": {32, true, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha2-256": {32, false, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, false, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, false, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index ac41a4168bfe..000000000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,866 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4419, section 5. -const msgKexDHGexGroup = 31 - -type kexDHGexGroupMsg struct { - P *big.Int `sshtype:"31"` - G *big.Int -} - -const msgKexDHGexInit = 32 - -type kexDHGexInitMsg struct { - X *big.Int `sshtype:"32"` -} - -const msgKexDHGexReply = 33 - -type kexDHGexReplyMsg struct { - HostKey []byte `sshtype:"33"` - Y *big.Int - Signature []byte -} - -const msgKexDHGexRequest = 34 - -type kexDHGexRequestMsg struct { - MinBits uint32 `sshtype:"34"` - PreferedBits uint32 - MaxBits uint32 -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4252, section 5.1 -const msgUserAuthSuccess = 52 - -// See RFC 4252, section 5.4 -const msgUserAuthBanner = 53 - -type userAuthBannerMsg struct { - Message string `sshtype:"53"` - // unused, but required to allow message parsing - Language string -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersID uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersID uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersID uint32 `sshtype:"91"` - MyID uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersID uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersID uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersID uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersID uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersID uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersID uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersID uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// See RFC 4462, section 3 -const msgUserAuthGSSAPIResponse = 60 - -type userAuthGSSAPIResponse struct { - SupportMech []byte `sshtype:"60"` -} - -const msgUserAuthGSSAPIToken = 61 - -type userAuthGSSAPIToken struct { - Token []byte `sshtype:"61"` -} - -const msgUserAuthGSSAPIMIC = 66 - -type userAuthGSSAPIMIC struct { - MIC []byte `sshtype:"66"` -} - -// See RFC 4462, section 3.9 -const msgUserAuthGSSAPIErrTok = 64 - -type userAuthGSSAPIErrTok struct { - ErrorToken []byte `sshtype:"64"` -} - -// See RFC 4462, section 3.8 -const msgUserAuthGSSAPIError = 65 - -type userAuthGSSAPIError struct { - MajorStatus uint32 `sshtype:"65"` - MinorStatus uint32 - Message string - LanguageTag string -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - case msgUserAuthGSSAPIToken: - msg = new(userAuthGSSAPIToken) - case msgUserAuthGSSAPIMIC: - msg = new(userAuthGSSAPIMIC) - case msgUserAuthGSSAPIErrTok: - msg = new(userAuthGSSAPIErrTok) - case msgUserAuthGSSAPIError: - msg = new(userAuthGSSAPIError) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} - -var packetTypeNames = map[byte]string{ - msgDisconnect: "disconnectMsg", - msgServiceRequest: "serviceRequestMsg", - msgServiceAccept: "serviceAcceptMsg", - msgKexInit: "kexInitMsg", - msgKexDHInit: "kexDHInitMsg", - msgKexDHReply: "kexDHReplyMsg", - msgUserAuthRequest: "userAuthRequestMsg", - msgUserAuthSuccess: "userAuthSuccessMsg", - msgUserAuthFailure: "userAuthFailureMsg", - msgUserAuthPubKeyOk: "userAuthPubKeyOkMsg", - msgGlobalRequest: "globalRequestMsg", - msgRequestSuccess: "globalRequestSuccessMsg", - msgRequestFailure: "globalRequestFailureMsg", - msgChannelOpen: "channelOpenMsg", - msgChannelData: "channelDataMsg", - msgChannelOpenConfirm: "channelOpenConfirmMsg", - msgChannelOpenFailure: "channelOpenFailureMsg", - msgChannelWindowAdjust: "windowAdjustMsg", - msgChannelEOF: "channelEOFMsg", - msgChannelClose: "channelCloseMsg", - msgChannelRequest: "channelRequestMsg", - msgChannelSuccess: "channelRequestSuccessMsg", - msgChannelFailure: "channelRequestFailureMsg", -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index 9654c01869ad..000000000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,351 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, chanSize), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, chanSize), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return m.handleUnknownChannelPacket(id, packet) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersID: msg.PeersID, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersID - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersID: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} - -func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - // RFC 4254 section 5.4 says unrecognized channel requests should - // receive a failure response. - case *channelRequestMsg: - if msg.WantReply { - return m.sendMessage(channelRequestFailureMsg{ - PeersID: msg.PeersID, - }) - } - return nil - default: - return fmt.Errorf("ssh: invalid channel %d", id) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index b6911e8306d6..000000000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,720 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "strings" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a user. -// The Permissions value for a successful authentication attempt is -// available in ServerConn, so it can be used to pass information from -// the user-authentication phase to the application layer. -type Permissions struct { - // CriticalOptions indicate restrictions to the default - // permissions, and are typically used in conjunction with - // user certificates. The standard for SSH certificates - // defines "force-command" (only allow the given command to - // execute) and "source-address" (only allow connections from - // the given address). The SSH package currently only enforces - // the "source-address" critical option. It is up to server - // implementations to enforce other critical options, such as - // "force-command", by checking them after the SSH handshake - // is successful. In general, SSH servers should reject - // connections that specify critical options that are unknown - // or not supported. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Lack of support for an - // extension does not preclude authenticating a user. Common - // extensions are "permit-agent-forwarding", - // "permit-X11-forwarding". The Go SSH library currently does - // not act on any extension, and it is up to server - // implementations to honor them. Extensions can be used to - // pass data from the authentication callbacks to the server - // application layer. - Extensions map[string]string -} - -type GSSAPIWithMICConfig struct { - // AllowLogin, must be set, is called when gssapi-with-mic - // authentication is selected (RFC 4462 section 3). The srcName is from the - // results of the GSS-API authentication. The format is username@DOMAIN. - // GSSAPI just guarantees to the server who the user is, but not if they can log in, and with what permissions. - // This callback is called after the user identity is established with GSSAPI to decide if the user can login with - // which permissions. If the user is allowed to login, it should return a nil error. - AllowLogin func(conn ConnMetadata, srcName string) (*Permissions, error) - - // Server must be set. It's the implementation - // of the GSSAPIServer interface. See GSSAPIServer interface for details. - Server GSSAPIServer -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // MaxAuthTries specifies the maximum number of authentication attempts - // permitted per connection. If set to a negative number, the number of - // attempts are unlimited. If set to zero, the number of attempts are limited - // to 6. - MaxAuthTries int - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client - // offers a public key for authentication. It must return a nil error - // if the given public key can be used to authenticate the - // given user. For example, see CertChecker.Authenticate. A - // call to this function does not guarantee that the key - // offered is in fact used to authenticate. To record any data - // depending on the public key, store it inside a - // Permissions.Extensions entry. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string - - // BannerCallback, if present, is called and the return string is sent to - // the client after key exchange completed but before authentication. - BannerCallback func(conn ConnMetadata) string - - // GSSAPIWithMICConfig includes gssapi server and callback, which if both non-nil, is used - // when gssapi-with-mic authentication is selected (RFC 4462 section 3). - GSSAPIWithMICConfig *GSSAPIWithMICConfig -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -// -// The returned error may be of type *ServerAuthError for -// authentication errors. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - if fullConf.MaxAuthTries == 0 { - fullConf.MaxAuthTries = 6 - } - // Check if the config contains any unsupported key exchanges - for _, kex := range fullConf.KeyExchanges { - if _, ok := serverForbiddenKexAlgos[kex]; ok { - return nil, nil, nil, fmt.Errorf("ssh: unsupported key exchange %s for server", kex) - } - } - - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && - config.KeyboardInteractiveCallback == nil && (config.GSSAPIWithMICConfig == nil || - config.GSSAPIWithMICConfig.AllowLogin == nil || config.GSSAPIWithMICConfig.Server == nil) { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.waitSession(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddrs string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - for _, sourceAddr := range strings.Split(sourceAddrs, ",") { - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if allowedIP.Equal(tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func gssExchangeToken(gssapiConfig *GSSAPIWithMICConfig, firstToken []byte, s *connection, - sessionID []byte, userAuthReq userAuthRequestMsg) (authErr error, perms *Permissions, err error) { - gssAPIServer := gssapiConfig.Server - defer gssAPIServer.DeleteSecContext() - var srcName string - for { - var ( - outToken []byte - needContinue bool - ) - outToken, srcName, needContinue, err = gssAPIServer.AcceptSecContext(firstToken) - if err != nil { - return err, nil, nil - } - if len(outToken) != 0 { - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIToken{ - Token: outToken, - })); err != nil { - return nil, nil, err - } - } - if !needContinue { - break - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, nil, err - } - } - packet, err := s.transport.readPacket() - if err != nil { - return nil, nil, err - } - userAuthGSSAPIMICReq := &userAuthGSSAPIMIC{} - if err := Unmarshal(packet, userAuthGSSAPIMICReq); err != nil { - return nil, nil, err - } - mic := buildMIC(string(sessionID), userAuthReq.User, userAuthReq.Service, userAuthReq.Method) - if err := gssAPIServer.VerifyMIC(mic, userAuthGSSAPIMICReq.MIC); err != nil { - return err, nil, nil - } - perms, authErr = gssapiConfig.AllowLogin(s, srcName) - return authErr, perms, nil -} - -// ServerAuthError represents server authentication errors and is -// sometimes returned by NewServerConn. It appends any authentication -// errors that may occur, and is returned if all of the authentication -// methods provided by the user failed to authenticate. -type ServerAuthError struct { - // Errors contains authentication errors returned by the authentication - // callback methods. The first entry is typically ErrNoAuth. - Errors []error -} - -func (l ServerAuthError) Error() string { - var errs []string - for _, err := range l.Errors { - errs = append(errs, err.Error()) - } - return "[" + strings.Join(errs, ", ") + "]" -} - -// ErrNoAuth is the error value returned if no -// authentication method has been passed yet. This happens as a normal -// part of the authentication loop, since the client first tries -// 'none' authentication to discover available methods. -// It is returned in ServerAuthError.Errors from NewServerConn. -var ErrNoAuth = errors.New("ssh: no auth passed yet") - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - sessionID := s.transport.getSessionID() - var cache pubKeyCache - var perms *Permissions - - authFailures := 0 - var authErrs []error - var displayedBanner bool - -userAuthLoop: - for { - if authFailures >= config.MaxAuthTries && config.MaxAuthTries > 0 { - discMsg := &disconnectMsg{ - Reason: 2, - Message: "too many authentication failures", - } - - if err := s.transport.writePacket(Marshal(discMsg)); err != nil { - return nil, err - } - - return nil, discMsg - } - - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - if err == io.EOF { - return nil, &ServerAuthError{Errors: authErrs} - } - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - - if !displayedBanner && config.BannerCallback != nil { - displayedBanner = true - msg := config.BannerCallback(s) - if msg != "" { - bannerMsg := &userAuthBannerMsg{ - Message: msg, - } - if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil { - return nil, err - } - } - } - - perms = nil - authErr := ErrNoAuth - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - - // allow initial attempt of 'none' without penalty - if authFailures == 0 { - authFailures-- - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configured") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format) - break - } - signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - case "gssapi-with-mic": - if config.GSSAPIWithMICConfig == nil { - authErr = errors.New("ssh: gssapi-with-mic auth not configured") - break - } - gssapiConfig := config.GSSAPIWithMICConfig - userAuthRequestGSSAPI, err := parseGSSAPIPayload(userAuthReq.Payload) - if err != nil { - return nil, parseError(msgUserAuthRequest) - } - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication. - if userAuthRequestGSSAPI.N == 0 { - authErr = fmt.Errorf("ssh: Mechanism negotiation is not supported") - break - } - var i uint32 - present := false - for i = 0; i < userAuthRequestGSSAPI.N; i++ { - if userAuthRequestGSSAPI.OIDS[i].Equal(krb5Mesh) { - present = true - break - } - } - if !present { - authErr = fmt.Errorf("ssh: GSSAPI authentication must use the Kerberos V5 mechanism") - break - } - // Initial server response, see RFC 4462 section 3.3. - if err := s.transport.writePacket(Marshal(&userAuthGSSAPIResponse{ - SupportMech: krb5OID, - })); err != nil { - return nil, err - } - // Exchange token, see RFC 4462 section 3.4. - packet, err := s.transport.readPacket() - if err != nil { - return nil, err - } - userAuthGSSAPITokenReq := &userAuthGSSAPIToken{} - if err := Unmarshal(packet, userAuthGSSAPITokenReq); err != nil { - return nil, err - } - authErr, perms, err = gssExchangeToken(gssapiConfig, userAuthGSSAPITokenReq.Token, s, sessionID, - userAuthReq) - if err != nil { - return nil, err - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - authErrs = append(authErrs, authErr) - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - authFailures++ - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - if config.GSSAPIWithMICConfig != nil && config.GSSAPIWithMICConfig.Server != nil && - config.GSSAPIWithMICConfig.AllowLogin != nil { - failureMsg.Methods = append(failureMsg.Methods, "gssapi-with-mic") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err := s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err := s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index d3321f6b784b..000000000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.7. -type ptyWindowChangeMsg struct { - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 -} - -// WindowChange informs the remote host about a terminal window dimension change to h rows and w columns. -func (s *Session) WindowChange(h, w int) error { - req := ptyWindowChangeMsg{ - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - } - _, err := s.ch.SendRequest("window-change", false, Marshal(&req)) - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/ssh_gss.go b/vendor/golang.org/x/crypto/ssh/ssh_gss.go deleted file mode 100644 index 24bd7c8e8304..000000000000 --- a/vendor/golang.org/x/crypto/ssh/ssh_gss.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/asn1" - "errors" -) - -var krb5OID []byte - -func init() { - krb5OID, _ = asn1.Marshal(krb5Mesh) -} - -// GSSAPIClient provides the API to plug-in GSSAPI authentication for client logins. -type GSSAPIClient interface { - // InitSecContext initiates the establishment of a security context for GSS-API between the - // ssh client and ssh server. Initially the token parameter should be specified as nil. - // The routine may return a outputToken which should be transferred to - // the ssh server, where the ssh server will present it to - // AcceptSecContext. If no token need be sent, InitSecContext will indicate this by setting - // needContinue to false. To complete the context - // establishment, one or more reply tokens may be required from the ssh - // server;if so, InitSecContext will return a needContinue which is true. - // In this case, InitSecContext should be called again when the - // reply token is received from the ssh server, passing the reply - // token to InitSecContext via the token parameters. - // See RFC 2743 section 2.2.1 and RFC 4462 section 3.4. - InitSecContext(target string, token []byte, isGSSDelegCreds bool) (outputToken []byte, needContinue bool, err error) - // GetMIC generates a cryptographic MIC for the SSH2 message, and places - // the MIC in a token for transfer to the ssh server. - // The contents of the MIC field are obtained by calling GSS_GetMIC() - // over the following, using the GSS-API context that was just - // established: - // string session identifier - // byte SSH_MSG_USERAUTH_REQUEST - // string user name - // string service - // string "gssapi-with-mic" - // See RFC 2743 section 2.3.1 and RFC 4462 3.5. - GetMIC(micFiled []byte) ([]byte, error) - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -// GSSAPIServer provides the API to plug in GSSAPI authentication for server logins. -type GSSAPIServer interface { - // AcceptSecContext allows a remotely initiated security context between the application - // and a remote peer to be established by the ssh client. The routine may return a - // outputToken which should be transferred to the ssh client, - // where the ssh client will present it to InitSecContext. - // If no token need be sent, AcceptSecContext will indicate this - // by setting the needContinue to false. To - // complete the context establishment, one or more reply tokens may be - // required from the ssh client. if so, AcceptSecContext - // will return a needContinue which is true, in which case it - // should be called again when the reply token is received from the ssh - // client, passing the token to AcceptSecContext via the - // token parameters. - // The srcName return value is the authenticated username. - // See RFC 2743 section 2.2.2 and RFC 4462 section 3.4. - AcceptSecContext(token []byte) (outputToken []byte, srcName string, needContinue bool, err error) - // VerifyMIC verifies that a cryptographic MIC, contained in the token parameter, - // fits the supplied message is received from the ssh client. - // See RFC 2743 section 2.3.2. - VerifyMIC(micField []byte, micToken []byte) error - // Whenever possible, it should be possible for - // DeleteSecContext() calls to be successfully processed even - // if other calls cannot succeed, thereby enabling context-related - // resources to be released. - // In addition to deleting established security contexts, - // gss_delete_sec_context must also be able to delete "half-built" - // security contexts resulting from an incomplete sequence of - // InitSecContext()/AcceptSecContext() calls. - // See RFC 2743 section 2.2.3. - DeleteSecContext() error -} - -var ( - // OpenSSH supports Kerberos V5 mechanism only for GSS-API authentication, - // so we also support the krb5 mechanism only. - // See RFC 1964 section 1. - krb5Mesh = asn1.ObjectIdentifier{1, 2, 840, 113554, 1, 2, 2} -) - -// The GSS-API authentication method is initiated when the client sends an SSH_MSG_USERAUTH_REQUEST -// See RFC 4462 section 3.2. -type userAuthRequestGSSAPI struct { - N uint32 - OIDS []asn1.ObjectIdentifier -} - -func parseGSSAPIPayload(payload []byte) (*userAuthRequestGSSAPI, error) { - n, rest, ok := parseUint32(payload) - if !ok { - return nil, errors.New("parse uint32 failed") - } - s := &userAuthRequestGSSAPI{ - N: n, - OIDS: make([]asn1.ObjectIdentifier, n), - } - for i := 0; i < int(n); i++ { - var ( - desiredMech []byte - err error - ) - desiredMech, rest, ok = parseString(rest) - if !ok { - return nil, errors.New("parse string failed") - } - if rest, err = asn1.Unmarshal(desiredMech, &s.OIDS[i]); err != nil { - return nil, err - } - - } - return s, nil -} - -// See RFC 4462 section 3.6. -func buildMIC(sessionID string, username string, service string, authMethod string) []byte { - out := make([]byte, 0, 0) - out = appendString(out, sessionID) - out = append(out, msgUserAuthRequest) - out = appendString(out, username) - out = appendString(out, service) - out = appendString(out, authMethod) - return out -} diff --git a/vendor/golang.org/x/crypto/ssh/streamlocal.go b/vendor/golang.org/x/crypto/ssh/streamlocal.go deleted file mode 100644 index b171b330bc38..000000000000 --- a/vendor/golang.org/x/crypto/ssh/streamlocal.go +++ /dev/null @@ -1,116 +0,0 @@ -package ssh - -import ( - "errors" - "io" - "net" -) - -// streamLocalChannelOpenDirectMsg is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "direct-streamlocal@openssh.com" string. -// -// See openssh-portable/PROTOCOL, section 2.4. connection: Unix domain socket forwarding -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL#L235 -type streamLocalChannelOpenDirectMsg struct { - socketPath string - reserved0 string - reserved1 uint32 -} - -// forwardedStreamLocalPayload is a struct used for SSH_MSG_CHANNEL_OPEN message -// with "forwarded-streamlocal@openssh.com" string. -type forwardedStreamLocalPayload struct { - SocketPath string - Reserved0 string -} - -// streamLocalChannelForwardMsg is a struct used for SSH2_MSG_GLOBAL_REQUEST message -// with "streamlocal-forward@openssh.com"/"cancel-streamlocal-forward@openssh.com" string. -type streamLocalChannelForwardMsg struct { - socketPath string -} - -// ListenUnix is similar to ListenTCP but uses a Unix domain socket. -func (c *Client) ListenUnix(socketPath string) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - m := streamLocalChannelForwardMsg{ - socketPath, - } - // send message - ok, _, err := c.SendRequest("streamlocal-forward@openssh.com", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: streamlocal-forward@openssh.com request denied by peer") - } - ch := c.forwards.add(&net.UnixAddr{Name: socketPath, Net: "unix"}) - - return &unixListener{socketPath, c, ch}, nil -} - -func (c *Client) dialStreamLocal(socketPath string) (Channel, error) { - msg := streamLocalChannelOpenDirectMsg{ - socketPath: socketPath, - } - ch, in, err := c.OpenChannel("direct-streamlocal@openssh.com", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type unixListener struct { - socketPath string - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *unixListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - }, nil -} - -// Close closes the listener. -func (l *unixListener) Close() error { - // this also closes the listener. - l.conn.forwards.remove(&net.UnixAddr{Name: l.socketPath, Net: "unix"}) - m := streamLocalChannelForwardMsg{ - l.socketPath, - } - ok, _, err := l.conn.SendRequest("cancel-streamlocal-forward@openssh.com", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-streamlocal-forward@openssh.com failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *unixListener) Addr() net.Addr { - return &net.UnixAddr{ - Name: l.socketPath, - Net: "unix", - } -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 80d35f5ec187..000000000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,474 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -// N must be "tcp", "tcp4", "tcp6", or "unix". -func (c *Client) Listen(n, addr string) (net.Listener, error) { - switch n { - case "tcp", "tcp4", "tcp6": - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) - case "unix": - return c.ListenUnix(addr) - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// handleForwards starts goroutines handling forwarded connections. -// It's called on first use by (*Client).ListenTCP to not launch -// goroutines until needed. -func (c *Client) handleForwards() { - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-tcpip")) - go c.forwards.handleChannels(c.HandleChannelOpen("forwarded-streamlocal@openssh.com")) -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - c.handleForwardsOnce.Do(c.handleForwards) - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.Addr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr net.Addr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.Addr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - laddr: addr, - c: make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var ( - laddr net.Addr - raddr net.Addr - err error - ) - switch channelType := ch.ChannelType(); channelType { - case "forwarded-tcpip": - var payload forwardedTCPPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err = parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err = parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - case "forwarded-streamlocal@openssh.com": - var payload forwardedStreamLocalPayload - if err = Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-streamlocal@openssh.com payload: "+err.Error()) - continue - } - laddr = &net.UnixAddr{ - Name: payload.SocketPath, - Net: "unix", - } - raddr = &net.UnixAddr{ - Name: "@", - Net: "unix", - } - default: - panic(fmt.Errorf("ssh: unknown channel type %s", channelType)) - } - if ok := l.forward(laddr, raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.Addr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.Network() == f.laddr.Network() && addr.String() == f.laddr.String() { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.Addr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.Network() == f.laddr.Network() && laddr.String() == f.laddr.String() { - f.c <- forward{newCh: ch, raddr: raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &chanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - var ch Channel - switch n { - case "tcp", "tcp4", "tcp6": - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - ch, err = c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - return &chanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil - case "unix": - var err error - ch, err = c.dialStreamLocal(addr) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: &net.UnixAddr{ - Name: "@", - Net: "unix", - }, - raddr: &net.UnixAddr{ - Name: addr, - Net: "unix", - }, - }, nil - default: - return nil, fmt.Errorf("ssh: unsupported protocol: %s", n) - } -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &chanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// chanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type chanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *chanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *chanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *chanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *chanConn) SetReadDeadline(deadline time.Time) error { - // for compatibility with previous version, - // the error message contains "tcpChan" - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *chanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index 49ddc2e7de46..000000000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,353 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "bytes" - "errors" - "io" - "log" -) - -// debugTransport if set, will print packet types as they go over the -// wire. No message decoding is done, to minimize the impact on timing. -const debugTransport = false - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection. The read is blocking, - // i.e. if error is nil, then the returned byte slice is - // always non-empty. - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - isClient bool - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writeCipherPacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writeCipherPacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readCipherPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readCipherPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult) - if err != nil { - return err - } - t.reader.pendingKeyChange <- ciph - - ciph, err = newPacketCipher(t.writer.dir, algs.w, kexResult) - if err != nil { - return err - } - t.writer.pendingKeyChange <- ciph - - return nil -} - -func (t *transport) printPacket(p []byte, write bool) { - if len(p) == 0 { - return - } - who := "server" - if t.isClient { - who = "client" - } - what := "read" - if write { - what = "write" - } - - log.Println(what, who, p[0]) -} - -// Read and decrypt next packet. -func (t *transport) readPacket() (p []byte, err error) { - for { - p, err = t.reader.readPacket(t.bufReader) - if err != nil { - break - } - if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) { - break - } - } - if debugTransport { - t.printPacket(p, false) - } - - return p, err -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readCipherPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - if debugTransport { - t.printPacket(packet, true) - } - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writeCipherPacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - t.isClient = isClient - - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv := make([]byte, cipherMode.ivSize) - key := make([]byte, cipherMode.keySize) - macKey := make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - - return cipherModes[algs.Cipher].create(key, iv, macKey, algs) -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for length := 0; length < maxVersionStringBytes; length++ { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - if !bytes.HasPrefix(versionString, []byte("SSH-")) { - // RFC 4253 says we need to ignore all version string lines - // except the one containing the SSH version (provided that - // all the lines do not exceed 255 bytes in total). - versionString = versionString[:0] - continue - } - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index a3c021d3f88e..000000000000 --- a/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 344bd1433450..000000000000 --- a/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, CancelFunc(f) -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, CancelFunc(f) -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index 64d31ecc3ef4..000000000000 --- a/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 5270db5db7db..000000000000 --- a/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index 1f9715341faa..000000000000 --- a/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE deleted file mode 100644 index 6a66aea5eafe..000000000000 --- a/vendor/golang.org/x/sync/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS deleted file mode 100644 index 733099041f84..000000000000 --- a/vendor/golang.org/x/sync/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/README.md b/vendor/golang.org/x/sync/README.md deleted file mode 100644 index 7c1c8f6c82af..000000000000 --- a/vendor/golang.org/x/sync/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Go Sync - -[![Go Reference](https://pkg.go.dev/badge/golang.org/x/sync.svg)](https://pkg.go.dev/golang.org/x/sync) - -This repository provides Go concurrency primitives in addition to the -ones provided by the language and "sync" and "sync/atomic" packages. - -## Download/Install - -The easiest way to install is to run `go get -u golang.org/x/sync`. You can -also manually git clone the repository to `$GOPATH/src/golang.org/x/sync`. - -## Report Issues / Send Patches - -This repository uses Gerrit for code changes. To learn how to submit changes to -this repository, see https://golang.org/doc/contribute.html. - -The main issue tracker for the sync repository is located at -https://github.com/golang/go/issues. Prefix your issue with "x/sync:" in the -subject line, so it is easy to find. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go deleted file mode 100644 index 9857fe53d3c9..000000000000 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. -package errgroup - -import ( - "context" - "sync" -) - -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid and does not cancel on error. -type Group struct { - cancel func() - - wg sync.WaitGroup - - errOnce sync.Once - err error -} - -// WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. -func WithContext(ctx context.Context) (*Group, context.Context) { - ctx, cancel := context.WithCancel(ctx) - return &Group{cancel: cancel}, ctx -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. -func (g *Group) Wait() error { - g.wg.Wait() - if g.cancel != nil { - g.cancel() - } - return g.err -} - -// Go calls the given function in a new goroutine. -// -// The first call to return a non-nil error cancels the group; its error will be -// returned by Wait. -func (g *Group) Go(f func() error) { - g.wg.Add(1) - - go func() { - defer g.wg.Done() - - if err := f(); err != nil { - g.errOnce.Do(func() { - g.err = err - if g.cancel != nil { - g.cancel() - } - }) - } - }() -} diff --git a/vendor/golang.org/x/sync/go.mod b/vendor/golang.org/x/sync/go.mod deleted file mode 100644 index 091b76ea3902..000000000000 --- a/vendor/golang.org/x/sync/go.mod +++ /dev/null @@ -1 +0,0 @@ -module golang.org/x/sync diff --git a/vendor/google.golang.org/grpc/health/client.go b/vendor/google.golang.org/grpc/health/client.go deleted file mode 100644 index b5bee4838024..000000000000 --- a/vendor/google.golang.org/grpc/health/client.go +++ /dev/null @@ -1,117 +0,0 @@ -/* - * - * Copyright 2018 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package health - -import ( - "context" - "fmt" - "io" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/connectivity" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/internal/backoff" - "google.golang.org/grpc/status" -) - -var ( - backoffStrategy = backoff.DefaultExponential - backoffFunc = func(ctx context.Context, retries int) bool { - d := backoffStrategy.Backoff(retries) - timer := time.NewTimer(d) - select { - case <-timer.C: - return true - case <-ctx.Done(): - timer.Stop() - return false - } - } -) - -func init() { - internal.HealthCheckFunc = clientHealthCheck -} - -const healthCheckMethod = "/grpc.health.v1.Health/Watch" - -// This function implements the protocol defined at: -// https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { - tryCnt := 0 - -retryConnection: - for { - // Backs off if the connection has failed in some way without receiving a message in the previous retry. - if tryCnt > 0 && !backoffFunc(ctx, tryCnt-1) { - return nil - } - tryCnt++ - - if ctx.Err() != nil { - return nil - } - setConnectivityState(connectivity.Connecting, nil) - rawS, err := newStream(healthCheckMethod) - if err != nil { - continue retryConnection - } - - s, ok := rawS.(grpc.ClientStream) - // Ideally, this should never happen. But if it happens, the server is marked as healthy for LBing purposes. - if !ok { - setConnectivityState(connectivity.Ready, nil) - return fmt.Errorf("newStream returned %v (type %T); want grpc.ClientStream", rawS, rawS) - } - - if err = s.SendMsg(&healthpb.HealthCheckRequest{Service: service}); err != nil && err != io.EOF { - // Stream should have been closed, so we can safely continue to create a new stream. - continue retryConnection - } - s.CloseSend() - - resp := new(healthpb.HealthCheckResponse) - for { - err = s.RecvMsg(resp) - - // Reports healthy for the LBing purposes if health check is not implemented in the server. - if status.Code(err) == codes.Unimplemented { - setConnectivityState(connectivity.Ready, nil) - return err - } - - // Reports unhealthy if server's Watch method gives an error other than UNIMPLEMENTED. - if err != nil { - setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but received health check RPC error: %v", err)) - continue retryConnection - } - - // As a message has been received, removes the need for backoff for the next retry by resetting the try count. - tryCnt = 0 - if resp.Status == healthpb.HealthCheckResponse_SERVING { - setConnectivityState(connectivity.Ready, nil) - } else { - setConnectivityState(connectivity.TransientFailure, fmt.Errorf("connection active but health check failed. status=%s", resp.Status)) - } - } - } -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go deleted file mode 100644 index a66024d23e30..000000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ /dev/null @@ -1,313 +0,0 @@ -// Copyright 2015 The gRPC Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// The canonical version of this proto can be found at -// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.25.0 -// protoc v3.14.0 -// source: grpc/health/v1/health.proto - -package grpc_health_v1 - -import ( - proto "github.com/golang/protobuf/proto" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type HealthCheckResponse_ServingStatus int32 - -const ( - HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 - HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 - HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 - HealthCheckResponse_SERVICE_UNKNOWN HealthCheckResponse_ServingStatus = 3 // Used only by the Watch method. -) - -// Enum value maps for HealthCheckResponse_ServingStatus. -var ( - HealthCheckResponse_ServingStatus_name = map[int32]string{ - 0: "UNKNOWN", - 1: "SERVING", - 2: "NOT_SERVING", - 3: "SERVICE_UNKNOWN", - } - HealthCheckResponse_ServingStatus_value = map[string]int32{ - "UNKNOWN": 0, - "SERVING": 1, - "NOT_SERVING": 2, - "SERVICE_UNKNOWN": 3, - } -) - -func (x HealthCheckResponse_ServingStatus) Enum() *HealthCheckResponse_ServingStatus { - p := new(HealthCheckResponse_ServingStatus) - *p = x - return p -} - -func (x HealthCheckResponse_ServingStatus) String() string { - return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) -} - -func (HealthCheckResponse_ServingStatus) Descriptor() protoreflect.EnumDescriptor { - return file_grpc_health_v1_health_proto_enumTypes[0].Descriptor() -} - -func (HealthCheckResponse_ServingStatus) Type() protoreflect.EnumType { - return &file_grpc_health_v1_health_proto_enumTypes[0] -} - -func (x HealthCheckResponse_ServingStatus) Number() protoreflect.EnumNumber { - return protoreflect.EnumNumber(x) -} - -// Deprecated: Use HealthCheckResponse_ServingStatus.Descriptor instead. -func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { - return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1, 0} -} - -type HealthCheckRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` -} - -func (x *HealthCheckRequest) Reset() { - *x = HealthCheckRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckRequest) ProtoMessage() {} - -func (x *HealthCheckRequest) ProtoReflect() protoreflect.Message { - mi := &file_grpc_health_v1_health_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckRequest.ProtoReflect.Descriptor instead. -func (*HealthCheckRequest) Descriptor() ([]byte, []int) { - return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{0} -} - -func (x *HealthCheckRequest) GetService() string { - if x != nil { - return x.Service - } - return "" -} - -type HealthCheckResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` -} - -func (x *HealthCheckResponse) Reset() { - *x = HealthCheckResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HealthCheckResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthCheckResponse) ProtoMessage() {} - -func (x *HealthCheckResponse) ProtoReflect() protoreflect.Message { - mi := &file_grpc_health_v1_health_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthCheckResponse.ProtoReflect.Descriptor instead. -func (*HealthCheckResponse) Descriptor() ([]byte, []int) { - return file_grpc_health_v1_health_proto_rawDescGZIP(), []int{1} -} - -func (x *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { - if x != nil { - return x.Status - } - return HealthCheckResponse_UNKNOWN -} - -var File_grpc_health_v1_health_proto protoreflect.FileDescriptor - -var file_grpc_health_v1_health_proto_rawDesc = []byte{ - 0x0a, 0x1b, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x31, - 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x22, 0x2e, 0x0a, - 0x12, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x22, 0xb1, 0x01, - 0x0a, 0x13, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x49, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x31, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x22, 0x4f, 0x0a, 0x0d, 0x53, 0x65, 0x72, 0x76, 0x69, 0x6e, 0x67, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0b, - 0x0a, 0x07, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0f, 0x0a, 0x0b, 0x4e, - 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x13, 0x0a, 0x0f, - 0x53, 0x45, 0x52, 0x56, 0x49, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x03, 0x32, 0xae, 0x01, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x50, 0x0a, 0x05, - 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, - 0x0a, 0x05, 0x57, 0x61, 0x74, 0x63, 0x68, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x43, - 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x2e, 0x48, 0x65, 0x61, - 0x6c, 0x74, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x30, 0x01, 0x42, 0x61, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x68, 0x65, - 0x61, 0x6c, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, - 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x68, - 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x5f, 0x76, 0x31, 0xaa, 0x02, 0x0e, 0x47, 0x72, 0x70, 0x63, 0x2e, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x2e, 0x56, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_grpc_health_v1_health_proto_rawDescOnce sync.Once - file_grpc_health_v1_health_proto_rawDescData = file_grpc_health_v1_health_proto_rawDesc -) - -func file_grpc_health_v1_health_proto_rawDescGZIP() []byte { - file_grpc_health_v1_health_proto_rawDescOnce.Do(func() { - file_grpc_health_v1_health_proto_rawDescData = protoimpl.X.CompressGZIP(file_grpc_health_v1_health_proto_rawDescData) - }) - return file_grpc_health_v1_health_proto_rawDescData -} - -var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_grpc_health_v1_health_proto_goTypes = []interface{}{ - (HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus - (*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest - (*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse -} -var file_grpc_health_v1_health_proto_depIdxs = []int32{ - 0, // 0: grpc.health.v1.HealthCheckResponse.status:type_name -> grpc.health.v1.HealthCheckResponse.ServingStatus - 1, // 1: grpc.health.v1.Health.Check:input_type -> grpc.health.v1.HealthCheckRequest - 1, // 2: grpc.health.v1.Health.Watch:input_type -> grpc.health.v1.HealthCheckRequest - 2, // 3: grpc.health.v1.Health.Check:output_type -> grpc.health.v1.HealthCheckResponse - 2, // 4: grpc.health.v1.Health.Watch:output_type -> grpc.health.v1.HealthCheckResponse - 3, // [3:5] is the sub-list for method output_type - 1, // [1:3] is the sub-list for method input_type - 1, // [1:1] is the sub-list for extension type_name - 1, // [1:1] is the sub-list for extension extendee - 0, // [0:1] is the sub-list for field type_name -} - -func init() { file_grpc_health_v1_health_proto_init() } -func file_grpc_health_v1_health_proto_init() { - if File_grpc_health_v1_health_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HealthCheckResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_grpc_health_v1_health_proto_rawDesc, - NumEnums: 1, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_grpc_health_v1_health_proto_goTypes, - DependencyIndexes: file_grpc_health_v1_health_proto_depIdxs, - EnumInfos: file_grpc_health_v1_health_proto_enumTypes, - MessageInfos: file_grpc_health_v1_health_proto_msgTypes, - }.Build() - File_grpc_health_v1_health_proto = out.File - file_grpc_health_v1_health_proto_rawDesc = nil - file_grpc_health_v1_health_proto_goTypes = nil - file_grpc_health_v1_health_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go deleted file mode 100644 index bdc3ae284e7a..000000000000 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ /dev/null @@ -1,201 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.1.0 -// - protoc v3.14.0 -// source: grpc/health/v1/health.proto - -package grpc_health_v1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -// HealthClient is the client API for Health service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. - Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) -} - -type healthClient struct { - cc grpc.ClientConnInterface -} - -func NewHealthClient(cc grpc.ClientConnInterface) HealthClient { - return &healthClient{cc} -} - -func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { - out := new(HealthCheckResponse) - err := c.cc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) { - stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], "/grpc.health.v1.Health/Watch", opts...) - if err != nil { - return nil, err - } - x := &healthWatchClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type Health_WatchClient interface { - Recv() (*HealthCheckResponse, error) - grpc.ClientStream -} - -type healthWatchClient struct { - grpc.ClientStream -} - -func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { - m := new(HealthCheckResponse) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -// HealthServer is the server API for Health service. -// All implementations should embed UnimplementedHealthServer -// for forward compatibility -type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. - Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) - // Performs a watch for the serving status of the requested service. - // The server will immediately send back a message indicating the current - // serving status. It will then subsequently send a new message whenever - // the service's serving status changes. - // - // If the requested service is unknown when the call is received, the - // server will send a message setting the serving status to - // SERVICE_UNKNOWN but will *not* terminate the call. If at some - // future point, the serving status of the service becomes known, the - // server will send a new message with the service's serving status. - // - // If the call terminates with status UNIMPLEMENTED, then clients - // should assume this method is not supported and should not retry the - // call. If the call terminates with any other status (including OK), - // clients should retry the call with appropriate exponential backoff. - Watch(*HealthCheckRequest, Health_WatchServer) error -} - -// UnimplementedHealthServer should be embedded to have forward compatible implementations. -type UnimplementedHealthServer struct { -} - -func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") -} -func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error { - return status.Errorf(codes.Unimplemented, "method Watch not implemented") -} - -// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to HealthServer will -// result in compilation errors. -type UnsafeHealthServer interface { - mustEmbedUnimplementedHealthServer() -} - -func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) { - s.RegisterService(&Health_ServiceDesc, srv) -} - -func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthCheckRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(HealthServer).Check(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/grpc.health.v1.Health/Check", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(HealthCheckRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(HealthServer).Watch(m, &healthWatchServer{stream}) -} - -type Health_WatchServer interface { - Send(*HealthCheckResponse) error - grpc.ServerStream -} - -type healthWatchServer struct { - grpc.ServerStream -} - -func (x *healthWatchServer) Send(m *HealthCheckResponse) error { - return x.ServerStream.SendMsg(m) -} - -// Health_ServiceDesc is the grpc.ServiceDesc for Health service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Health_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "grpc.health.v1.Health", - HandlerType: (*HealthServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Check", - Handler: _Health_Check_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Watch", - Handler: _Health_Watch_Handler, - ServerStreams: true, - }, - }, - Metadata: "grpc/health/v1/health.proto", -} diff --git a/vendor/google.golang.org/grpc/health/logging.go b/vendor/google.golang.org/grpc/health/logging.go deleted file mode 100644 index 83c6acf55ef6..000000000000 --- a/vendor/google.golang.org/grpc/health/logging.go +++ /dev/null @@ -1,23 +0,0 @@ -/* - * - * Copyright 2020 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package health - -import "google.golang.org/grpc/grpclog" - -var logger = grpclog.Component("health_service") diff --git a/vendor/google.golang.org/grpc/health/server.go b/vendor/google.golang.org/grpc/health/server.go deleted file mode 100644 index cce6312d77f9..000000000000 --- a/vendor/google.golang.org/grpc/health/server.go +++ /dev/null @@ -1,163 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -// Package health provides a service that exposes server's health and it must be -// imported to enable support for client-side health checks. -package health - -import ( - "context" - "sync" - - "google.golang.org/grpc/codes" - healthgrpc "google.golang.org/grpc/health/grpc_health_v1" - healthpb "google.golang.org/grpc/health/grpc_health_v1" - "google.golang.org/grpc/status" -) - -// Server implements `service Health`. -type Server struct { - healthgrpc.UnimplementedHealthServer - mu sync.RWMutex - // If shutdown is true, it's expected all serving status is NOT_SERVING, and - // will stay in NOT_SERVING. - shutdown bool - // statusMap stores the serving status of the services this Server monitors. - statusMap map[string]healthpb.HealthCheckResponse_ServingStatus - updates map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus -} - -// NewServer returns a new Server. -func NewServer() *Server { - return &Server{ - statusMap: map[string]healthpb.HealthCheckResponse_ServingStatus{"": healthpb.HealthCheckResponse_SERVING}, - updates: make(map[string]map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus), - } -} - -// Check implements `service Health`. -func (s *Server) Check(ctx context.Context, in *healthpb.HealthCheckRequest) (*healthpb.HealthCheckResponse, error) { - s.mu.RLock() - defer s.mu.RUnlock() - if servingStatus, ok := s.statusMap[in.Service]; ok { - return &healthpb.HealthCheckResponse{ - Status: servingStatus, - }, nil - } - return nil, status.Error(codes.NotFound, "unknown service") -} - -// Watch implements `service Health`. -func (s *Server) Watch(in *healthpb.HealthCheckRequest, stream healthgrpc.Health_WatchServer) error { - service := in.Service - // update channel is used for getting service status updates. - update := make(chan healthpb.HealthCheckResponse_ServingStatus, 1) - s.mu.Lock() - // Puts the initial status to the channel. - if servingStatus, ok := s.statusMap[service]; ok { - update <- servingStatus - } else { - update <- healthpb.HealthCheckResponse_SERVICE_UNKNOWN - } - - // Registers the update channel to the correct place in the updates map. - if _, ok := s.updates[service]; !ok { - s.updates[service] = make(map[healthgrpc.Health_WatchServer]chan healthpb.HealthCheckResponse_ServingStatus) - } - s.updates[service][stream] = update - defer func() { - s.mu.Lock() - delete(s.updates[service], stream) - s.mu.Unlock() - }() - s.mu.Unlock() - - var lastSentStatus healthpb.HealthCheckResponse_ServingStatus = -1 - for { - select { - // Status updated. Sends the up-to-date status to the client. - case servingStatus := <-update: - if lastSentStatus == servingStatus { - continue - } - lastSentStatus = servingStatus - err := stream.Send(&healthpb.HealthCheckResponse{Status: servingStatus}) - if err != nil { - return status.Error(codes.Canceled, "Stream has ended.") - } - // Context done. Removes the update channel from the updates map. - case <-stream.Context().Done(): - return status.Error(codes.Canceled, "Stream has ended.") - } - } -} - -// SetServingStatus is called when need to reset the serving status of a service -// or insert a new service entry into the statusMap. -func (s *Server) SetServingStatus(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.mu.Lock() - defer s.mu.Unlock() - if s.shutdown { - logger.Infof("health: status changing for %s to %v is ignored because health service is shutdown", service, servingStatus) - return - } - - s.setServingStatusLocked(service, servingStatus) -} - -func (s *Server) setServingStatusLocked(service string, servingStatus healthpb.HealthCheckResponse_ServingStatus) { - s.statusMap[service] = servingStatus - for _, update := range s.updates[service] { - // Clears previous updates, that are not sent to the client, from the channel. - // This can happen if the client is not reading and the server gets flow control limited. - select { - case <-update: - default: - } - // Puts the most recent update to the channel. - update <- servingStatus - } -} - -// Shutdown sets all serving status to NOT_SERVING, and configures the server to -// ignore all future status changes. -// -// This changes serving status for all services. To set status for a particular -// services, call SetServingStatus(). -func (s *Server) Shutdown() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = true - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_NOT_SERVING) - } -} - -// Resume sets all serving status to SERVING, and configures the server to -// accept all future status changes. -// -// This changes serving status for all services. To set status for a particular -// services, call SetServingStatus(). -func (s *Server) Resume() { - s.mu.Lock() - defer s.mu.Unlock() - s.shutdown = false - for service := range s.statusMap { - s.setServingStatusLocked(service, healthpb.HealthCheckResponse_SERVING) - } -}